diff --git a/.azure-pipelines/azure-pipelines.yml b/.azure-pipelines/azure-pipelines.yml
index d6ad2d3b5a..455d0659b4 100644
--- a/.azure-pipelines/azure-pipelines.yml
+++ b/.azure-pipelines/azure-pipelines.yml
@@ -29,14 +29,14 @@ schedules:
always: true
branches:
include:
+ - stable-11
- stable-10
- - stable-9
- cron: 0 11 * * 0
displayName: Weekly (old stable branches)
always: true
branches:
include:
- - stable-8
+ - stable-9
variables:
- name: checkoutPath
@@ -51,12 +51,25 @@ variables:
resources:
containers:
- container: default
- image: quay.io/ansible/azure-pipelines-test-container:6.0.0
+ image: quay.io/ansible/azure-pipelines-test-container:7.0.0
pool: Standard
stages:
### Sanity
+ - stage: Sanity_devel
+ displayName: Sanity devel
+ dependsOn: []
+ jobs:
+ - template: templates/matrix.yml
+ parameters:
+ nameFormat: Test {0}
+ testFormat: devel/sanity/{0}
+ targets:
+ - test: 1
+ - test: 2
+ - test: 3
+ - test: 4
- stage: Sanity_2_19
displayName: Sanity 2.19
dependsOn: []
@@ -96,20 +109,22 @@ stages:
- test: 2
- test: 3
- test: 4
- - stage: Sanity_2_16
- displayName: Sanity 2.16
+### Units
+ - stage: Units_devel
+ displayName: Units devel
dependsOn: []
jobs:
- template: templates/matrix.yml
parameters:
- nameFormat: Test {0}
- testFormat: 2.16/sanity/{0}
+ nameFormat: Python {0}
+ testFormat: devel/units/{0}/1
targets:
- - test: 1
- - test: 2
- - test: 3
- - test: 4
-### Units
+ - test: 3.9
+ - test: '3.10'
+ - test: '3.11'
+ - test: '3.12'
+ - test: '3.13'
+ - test: '3.14'
- stage: Units_2_19
displayName: Units 2.19
dependsOn: []
@@ -120,11 +135,8 @@ stages:
testFormat: 2.19/units/{0}/1
targets:
- test: 3.8
- - test: 3.9
- - test: '3.10'
- - test: '3.11'
- - test: '3.12'
- - test: '3.13'
+ - test: "3.11"
+ - test: "3.13"
- stage: Units_2_18
displayName: Units 2.18
dependsOn: []
@@ -135,6 +147,7 @@ stages:
testFormat: 2.18/units/{0}/1
targets:
- test: 3.8
+ - test: "3.11"
- test: "3.13"
- stage: Units_2_17
displayName: Units 2.17
@@ -146,39 +159,50 @@ stages:
testFormat: 2.17/units/{0}/1
targets:
- test: 3.7
+ - test: "3.10"
- test: "3.12"
- - stage: Units_2_16
- displayName: Units 2.16
- dependsOn: []
- jobs:
- - template: templates/matrix.yml
- parameters:
- nameFormat: Python {0}
- testFormat: 2.16/units/{0}/1
- targets:
- - test: 2.7
- - test: 3.6
- - test: "3.11"
## Remote
- - stage: Remote_2_19_extra_vms
- displayName: Remote 2.19 extra VMs
+ - stage: Remote_devel_extra_vms
+ displayName: Remote devel extra VMs
dependsOn: []
jobs:
- template: templates/matrix.yml
parameters:
- testFormat: 2.19/{0}
+ testFormat: devel/{0}
targets:
- - name: Alpine 3.21
- test: alpine/3.21
- # - name: Fedora 41
- # test: fedora/41
+ - name: Alpine 3.22
+ test: alpine/3.22
+ # - name: Fedora 42
+ # test: fedora/42
- name: Ubuntu 22.04
test: ubuntu/22.04
- name: Ubuntu 24.04
test: ubuntu/24.04
groups:
- vm
+ - stage: Remote_devel
+ displayName: Remote devel
+ dependsOn: []
+ jobs:
+ - template: templates/matrix.yml
+ parameters:
+ testFormat: devel/{0}
+ targets:
+ - name: macOS 15.3
+ test: macos/15.3
+ - name: RHEL 10.0
+ test: rhel/10.0
+ - name: RHEL 9.6
+ test: rhel/9.6
+ - name: FreeBSD 14.3
+ test: freebsd/14.3
+ - name: FreeBSD 13.5
+ test: freebsd/13.5
+ groups:
+ - 1
+ - 2
+ - 3
- stage: Remote_2_19
displayName: Remote 2.19
dependsOn: []
@@ -187,16 +211,12 @@ stages:
parameters:
testFormat: 2.19/{0}
targets:
- - name: macOS 15.3
- test: macos/15.3
- - name: RHEL 10.0
- test: rhel/10.0
- name: RHEL 9.5
test: rhel/9.5
+ - name: RHEL 10.0
+ test: rhel/10.0
- name: FreeBSD 14.2
test: freebsd/14.2
- - name: FreeBSD 13.5
- test: freebsd/13.5
groups:
- 1
- 2
@@ -227,38 +247,36 @@ stages:
parameters:
testFormat: 2.17/{0}
targets:
- - name: FreeBSD 13.3
- test: freebsd/13.3
+ - name: FreeBSD 13.5
+ test: freebsd/13.5
- name: RHEL 9.3
test: rhel/9.3
groups:
- 1
- 2
- 3
- - stage: Remote_2_16
- displayName: Remote 2.16
+
+### Docker
+ - stage: Docker_devel
+ displayName: Docker devel
dependsOn: []
jobs:
- template: templates/matrix.yml
parameters:
- testFormat: 2.16/{0}
+ testFormat: devel/linux/{0}
targets:
- - name: macOS 13.2
- test: macos/13.2
- - name: RHEL 9.2
- test: rhel/9.2
- - name: RHEL 8.8
- test: rhel/8.8
- - name: RHEL 7.9
- test: rhel/7.9
- # - name: FreeBSD 13.2
- # test: freebsd/13.2
+ - name: Fedora 42
+ test: fedora42
+ - name: Alpine 3.22
+ test: alpine322
+ - name: Ubuntu 22.04
+ test: ubuntu2204
+ - name: Ubuntu 24.04
+ test: ubuntu2404
groups:
- 1
- 2
- 3
-
-### Docker
- stage: Docker_2_19
displayName: Docker 2.19
dependsOn: []
@@ -271,10 +289,6 @@ stages:
test: fedora41
- name: Alpine 3.21
test: alpine321
- - name: Ubuntu 22.04
- test: ubuntu2204
- - name: Ubuntu 24.04
- test: ubuntu2404
groups:
- 1
- 2
@@ -315,35 +329,15 @@ stages:
- 1
- 2
- 3
- - stage: Docker_2_16
- displayName: Docker 2.16
- dependsOn: []
- jobs:
- - template: templates/matrix.yml
- parameters:
- testFormat: 2.16/linux/{0}
- targets:
- - name: Fedora 38
- test: fedora38
- - name: openSUSE 15
- test: opensuse15
- - name: Alpine 3
- test: alpine3
- - name: CentOS 7
- test: centos7
- groups:
- - 1
- - 2
- - 3
### Community Docker
- - stage: Docker_community_2_19
- displayName: Docker (community images) 2.19
+ - stage: Docker_community_devel
+ displayName: Docker (community images) devel
dependsOn: []
jobs:
- template: templates/matrix.yml
parameters:
- testFormat: 2.19/linux-community/{0}
+ testFormat: devel/linux-community/{0}
targets:
- name: Debian Bullseye
test: debian-bullseye/3.9
@@ -358,6 +352,18 @@ stages:
### Generic
# Right now all generic tests are disabled. Uncomment when at least one of them is re-enabled.
+# - stage: Generic_devel
+# displayName: Generic devel
+# dependsOn: []
+# jobs:
+# - template: templates/matrix.yml
+# parameters:
+# nameFormat: Python {0}
+# testFormat: devel/generic/{0}/1
+# targets:
+# - test: '3.9'
+# - test: '3.12'
+# - test: '3.14'
# - stage: Generic_2_19
# displayName: Generic 2.19
# dependsOn: []
@@ -367,8 +373,7 @@ stages:
# nameFormat: Python {0}
# testFormat: 2.19/generic/{0}/1
# targets:
-# - test: '3.8'
-# - test: '3.11'
+# - test: '3.9'
# - test: '3.13'
# - stage: Generic_2_18
# displayName: Generic 2.18
@@ -392,44 +397,32 @@ stages:
# targets:
# - test: '3.7'
# - test: '3.12'
-# - stage: Generic_2_16
-# displayName: Generic 2.16
-# dependsOn: []
-# jobs:
-# - template: templates/matrix.yml
-# parameters:
-# nameFormat: Python {0}
-# testFormat: 2.16/generic/{0}/1
-# targets:
-# - test: '2.7'
-# - test: '3.6'
-# - test: '3.11'
- stage: Summary
condition: succeededOrFailed()
dependsOn:
+ - Sanity_devel
- Sanity_2_19
- Sanity_2_18
- Sanity_2_17
- - Sanity_2_16
+ - Units_devel
- Units_2_19
- Units_2_18
- Units_2_17
- - Units_2_16
- - Remote_2_19_extra_vms
+ - Remote_devel_extra_vms
+ - Remote_devel
- Remote_2_19
- Remote_2_18
- Remote_2_17
- - Remote_2_16
+ - Docker_devel
- Docker_2_19
- Docker_2_18
- Docker_2_17
- - Docker_2_16
- - Docker_community_2_19
+ - Docker_community_devel
# Right now all generic tests are disabled. Uncomment when at least one of them is re-enabled.
+# - Generic_devel
# - Generic_2_19
# - Generic_2_18
# - Generic_2_17
-# - Generic_2_16
jobs:
- template: templates/coverage.yml
diff --git a/.azure-pipelines/templates/matrix.yml b/.azure-pipelines/templates/matrix.yml
index 4876375855..49f5d8595a 100644
--- a/.azure-pipelines/templates/matrix.yml
+++ b/.azure-pipelines/templates/matrix.yml
@@ -50,11 +50,11 @@ jobs:
parameters:
jobs:
- ${{ if eq(length(parameters.groups), 0) }}:
- - ${{ each target in parameters.targets }}:
- - name: ${{ format(parameters.nameFormat, coalesce(target.name, target.test)) }}
- test: ${{ format(parameters.testFormat, coalesce(target.test, target.name)) }}
- - ${{ if not(eq(length(parameters.groups), 0)) }}:
- - ${{ each group in parameters.groups }}:
- ${{ each target in parameters.targets }}:
- - name: ${{ format(format(parameters.nameGroupFormat, parameters.nameFormat), coalesce(target.name, target.test), group) }}
- test: ${{ format(format(parameters.testGroupFormat, parameters.testFormat), coalesce(target.test, target.name), group) }}
+ - name: ${{ format(parameters.nameFormat, coalesce(target.name, target.test)) }}
+ test: ${{ format(parameters.testFormat, coalesce(target.test, target.name)) }}
+ - ${{ if not(eq(length(parameters.groups), 0)) }}:
+ - ${{ each group in parameters.groups }}:
+ - ${{ each target in parameters.targets }}:
+ - name: ${{ format(format(parameters.nameGroupFormat, parameters.nameFormat), coalesce(target.name, target.test), group) }}
+ test: ${{ format(format(parameters.testGroupFormat, parameters.testFormat), coalesce(target.test, target.name), group) }}
diff --git a/.azure-pipelines/templates/test.yml b/.azure-pipelines/templates/test.yml
index 700cf629d7..b263379c06 100644
--- a/.azure-pipelines/templates/test.yml
+++ b/.azure-pipelines/templates/test.yml
@@ -14,37 +14,37 @@ parameters:
jobs:
- ${{ each job in parameters.jobs }}:
- - job: test_${{ replace(replace(replace(job.test, '/', '_'), '.', '_'), '-', '_') }}
- displayName: ${{ job.name }}
- container: default
- workspace:
- clean: all
- steps:
- - checkout: self
- fetchDepth: $(fetchDepth)
- path: $(checkoutPath)
- - bash: .azure-pipelines/scripts/run-tests.sh "$(entryPoint)" "${{ job.test }}" "$(coverageBranches)"
- displayName: Run Tests
- - bash: .azure-pipelines/scripts/process-results.sh
- condition: succeededOrFailed()
- displayName: Process Results
- - bash: .azure-pipelines/scripts/aggregate-coverage.sh "$(Agent.TempDirectory)"
- condition: eq(variables.haveCoverageData, 'true')
- displayName: Aggregate Coverage Data
- - task: PublishTestResults@2
- condition: eq(variables.haveTestResults, 'true')
- inputs:
- testResultsFiles: "$(outputPath)/junit/*.xml"
- displayName: Publish Test Results
- - task: PublishPipelineArtifact@1
- condition: eq(variables.haveBotResults, 'true')
- displayName: Publish Bot Results
- inputs:
- targetPath: "$(outputPath)/bot/"
- artifactName: "Bot $(System.JobAttempt) $(System.StageDisplayName) $(System.JobDisplayName)"
- - task: PublishPipelineArtifact@1
- condition: eq(variables.haveCoverageData, 'true')
- displayName: Publish Coverage Data
- inputs:
- targetPath: "$(Agent.TempDirectory)/coverage/"
- artifactName: "Coverage $(System.JobAttempt) $(System.StageDisplayName) $(System.JobDisplayName)"
+ - job: test_${{ replace(replace(replace(job.test, '/', '_'), '.', '_'), '-', '_') }}
+ displayName: ${{ job.name }}
+ container: default
+ workspace:
+ clean: all
+ steps:
+ - checkout: self
+ fetchDepth: $(fetchDepth)
+ path: $(checkoutPath)
+ - bash: .azure-pipelines/scripts/run-tests.sh "$(entryPoint)" "${{ job.test }}" "$(coverageBranches)"
+ displayName: Run Tests
+ - bash: .azure-pipelines/scripts/process-results.sh
+ condition: succeededOrFailed()
+ displayName: Process Results
+ - bash: .azure-pipelines/scripts/aggregate-coverage.sh "$(Agent.TempDirectory)"
+ condition: eq(variables.haveCoverageData, 'true')
+ displayName: Aggregate Coverage Data
+ - task: PublishTestResults@2
+ condition: eq(variables.haveTestResults, 'true')
+ inputs:
+ testResultsFiles: "$(outputPath)/junit/*.xml"
+ displayName: Publish Test Results
+ - task: PublishPipelineArtifact@1
+ condition: eq(variables.haveBotResults, 'true')
+ displayName: Publish Bot Results
+ inputs:
+ targetPath: "$(outputPath)/bot/"
+ artifactName: "Bot $(System.JobAttempt) $(System.StageDisplayName) $(System.JobDisplayName)"
+ - task: PublishPipelineArtifact@1
+ condition: eq(variables.haveCoverageData, 'true')
+ displayName: Publish Coverage Data
+ inputs:
+ targetPath: "$(Agent.TempDirectory)/coverage/"
+ artifactName: "Coverage $(System.JobAttempt) $(System.StageDisplayName) $(System.JobDisplayName)"
diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs
new file mode 100644
index 0000000000..cd4bdfee65
--- /dev/null
+++ b/.git-blame-ignore-revs
@@ -0,0 +1,9 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# YAML reformatting
+d032de3b16eed11ea3a31cd3d96d78f7c46a2ee0
+e8f965fbf8154ea177c6622da149f2ae8533bd3c
+e938ca5f20651abc160ee6aba10014013d04dcc1
+eaa5e07b2866e05b6c7b5628ca92e9cb1142d008
diff --git a/.github/BOTMETA.yml b/.github/BOTMETA.yml
index 6827441a4c..fac3fae8f8 100644
--- a/.github/BOTMETA.yml
+++ b/.github/BOTMETA.yml
@@ -61,7 +61,6 @@ files:
$callbacks/elastic.py:
keywords: apm observability
maintainers: v1v
- $callbacks/hipchat.py: {}
$callbacks/jabber.py: {}
$callbacks/log_plays.py: {}
$callbacks/loganalytics.py:
@@ -78,6 +77,8 @@ files:
$callbacks/opentelemetry.py:
keywords: opentelemetry observability
maintainers: v1v
+ $callbacks/print_task.py:
+ maintainers: demonpig
$callbacks/say.py:
keywords: brew cask darwin homebrew macosx macports osx
labels: macos say
@@ -91,6 +92,8 @@ files:
maintainers: ryancurrah
$callbacks/syslog_json.py:
maintainers: imjoseangel
+ $callbacks/tasks_only.py:
+ maintainers: felixfontein
$callbacks/timestamp.py:
maintainers: kurokobo
$callbacks/unixy.py:
@@ -117,6 +120,8 @@ files:
$connections/saltstack.py:
labels: saltstack
maintainers: mscherer
+ $connections/wsl.py:
+ maintainers: rgl
$connections/zone.py:
maintainers: $team_ansible_core
$doc_fragments/:
@@ -136,6 +141,8 @@ files:
$doc_fragments/xenserver.py:
labels: xenserver
maintainers: bvitnik
+ $filters/accumulate.py:
+ maintainers: VannTen
$filters/counter.py:
maintainers: keilr
$filters/crc32.py:
@@ -158,6 +165,14 @@ files:
maintainers: Ajpantuso
$filters/jc.py:
maintainers: kellyjonbrazil
+ $filters/json_diff.yml:
+ maintainers: numo68
+ $filters/json_patch.py:
+ maintainers: numo68
+ $filters/json_patch.yml:
+ maintainers: numo68
+ $filters/json_patch_recipe.yml:
+ maintainers: numo68
$filters/json_query.py: {}
$filters/keep_keys.py:
maintainers: vbotka
@@ -194,6 +209,8 @@ files:
maintainers: resmo
$filters/to_months.yml:
maintainers: resmo
+ $filters/to_prettytable.py:
+ maintainers: tgadiev
$filters/to_seconds.yml:
maintainers: resmo
$filters/to_time_unit.yml:
@@ -212,6 +229,8 @@ files:
maintainers: opoplawski
$inventories/gitlab_runners.py:
maintainers: morph027
+ $inventories/iocage.py:
+ maintainers: vbotka
$inventories/icinga2.py:
maintainers: BongoEADGC6
$inventories/linode.py:
@@ -227,13 +246,9 @@ files:
keywords: opennebula dynamic inventory script
labels: cloud opennebula
maintainers: feldsam
- $inventories/proxmox.py:
- maintainers: $team_virt ilijamt krauthosting
$inventories/scaleway.py:
labels: cloud scaleway
maintainers: $team_scaleway
- $inventories/stackpath_compute.py:
- maintainers: shayrybak
$inventories/virtualbox.py: {}
$inventories/xen_orchestra.py:
maintainers: ddelnano shinuza
@@ -277,9 +292,6 @@ files:
$lookups/lastpass.py: {}
$lookups/lmdb_kv.py:
maintainers: jpmens
- $lookups/manifold.py:
- labels: manifold
- maintainers: galanoff
$lookups/merge_variables.py:
maintainers: rlenferink m-a-r-k-e alpex8
$lookups/onepass:
@@ -291,6 +303,8 @@ files:
$lookups/onepassword_raw.py:
ignore: scottsb
maintainers: azenk
+ $lookups/onepassword_ssh_key.py:
+ maintainers: mohammedbabelly20
$lookups/passwordstore.py: {}
$lookups/random_pet.py:
maintainers: Akasurde
@@ -308,8 +322,12 @@ files:
maintainers: delineaKrehl tylerezimmerman
$module_utils/:
labels: module_utils
+ $module_utils/android_sdkmanager.py:
+ maintainers: shamilovstas
$module_utils/btrfs.py:
maintainers: gnfzdz
+ $module_utils/cmd_runner_fmt.py:
+ maintainers: russoz
$module_utils/cmd_runner.py:
maintainers: russoz
$module_utils/deps.py:
@@ -356,9 +374,13 @@ files:
$module_utils/oracle/oci_utils.py:
labels: cloud
maintainers: $team_oracle
+ $module_utils/pacemaker.py:
+ maintainers: munchtoast
$module_utils/pipx.py:
labels: pipx
maintainers: russoz
+ $module_utils/pkg_req.py:
+ maintainers: russoz
$module_utils/python_runner.py:
maintainers: russoz
$module_utils/puppet.py:
@@ -380,6 +402,8 @@ files:
maintainers: russoz
$module_utils/ssh.py:
maintainers: russoz
+ $module_utils/systemd.py:
+ maintainers: NomakCooper
$module_utils/storage/hpe3par/hpe3par.py:
maintainers: farhan7500 gautamphegde
$module_utils/utm_utils.py:
@@ -391,6 +415,8 @@ files:
$module_utils/wdc_redfish_utils.py:
labels: wdc_redfish_utils
maintainers: $team_wdc
+ $module_utils/xdg_mime.py:
+ maintainers: mhalano
$module_utils/xenserver.py:
labels: xenserver
maintainers: bvitnik
@@ -417,6 +443,8 @@ files:
ignore: DavidWittman jiuka
labels: alternatives
maintainers: mulby
+ $modules/android_sdk.py:
+ maintainers: shamilovstas
$modules/ansible_galaxy_install.py:
maintainers: russoz
$modules/apache2_mod_proxy.py:
@@ -472,8 +500,6 @@ files:
maintainers: NickatEpic
$modules/cisco_webex.py:
maintainers: drew-russell
- $modules/clc_:
- maintainers: clc-runner
$modules/cloud_init_data_facts.py:
maintainers: resmo
$modules/cloudflare_dns.py:
@@ -505,6 +531,8 @@ files:
ignore: skornehl
$modules/dconf.py:
maintainers: azaghal
+ $modules/decompress.py:
+ maintainers: shamilovstas
$modules/deploy_helper.py:
maintainers: ramondelafuente
$modules/dimensiondata_network.py:
@@ -630,8 +658,6 @@ files:
maintainers: marns93
$modules/hg.py:
maintainers: yeukhon
- $modules/hipchat.py:
- maintainers: pb8226 shirou
$modules/homebrew.py:
ignore: ryansb
keywords: brew cask darwin homebrew macosx macports osx
@@ -757,10 +783,14 @@ files:
maintainers: brettmilford unnecessary-username juanmcasanova
$modules/jenkins_build_info.py:
maintainers: juanmcasanova
+ $modules/jenkins_credential.py:
+ maintainers: YoussefKhalidAli
$modules/jenkins_job.py:
maintainers: sermilrod
$modules/jenkins_job_info.py:
maintainers: stpierre
+ $modules/jenkins_node.py:
+ maintainers: phyrwork
$modules/jenkins_plugin.py:
maintainers: jtyr
$modules/jenkins_script.py:
@@ -797,6 +827,8 @@ files:
maintainers: fynncfchen johncant
$modules/keycloak_clientsecret_regenerate.py:
maintainers: fynncfchen johncant
+ $modules/keycloak_component.py:
+ maintainers: fivetide
$modules/keycloak_group.py:
maintainers: adamgoossens
$modules/keycloak_identity_provider.py:
@@ -829,6 +861,8 @@ files:
maintainers: ahussey-redhat
$modules/kibana_plugin.py:
maintainers: barryib
+ $modules/krb_ticket.py:
+ maintainers: abakanovskii
$modules/launchd.py:
maintainers: martinm82
$modules/layman.py:
@@ -839,6 +873,8 @@ files:
maintainers: drybjed jtyr noles
$modules/ldap_entry.py:
maintainers: jtyr
+ $modules/ldap_inc.py:
+ maintainers: pduveau
$modules/ldap_passwd.py:
maintainers: KellerFuchs jtyr
$modules/ldap_search.py:
@@ -865,6 +901,8 @@ files:
maintainers: nerzhul
$modules/lvg.py:
maintainers: abulimov
+ $modules/lvm_pv.py:
+ maintainers: klention
$modules/lvg_rename.py:
maintainers: lszomor
$modules/lvol.py:
@@ -1019,7 +1057,9 @@ files:
$modules/ovh_monthly_billing.py:
maintainers: fraff
$modules/pacemaker_cluster.py:
- maintainers: matbu
+ maintainers: matbu munchtoast
+ $modules/pacemaker_resource.py:
+ maintainers: munchtoast
$modules/packet_:
maintainers: nurfet-becirevic t0mk
$modules/packet_device.py:
@@ -1095,32 +1135,6 @@ files:
maintainers: $team_bsd berenddeboer
$modules/pritunl_:
maintainers: Lowess
- $modules/profitbricks:
- maintainers: baldwinSPC
- $modules/proxmox:
- keywords: kvm libvirt proxmox qemu
- labels: proxmox virt
- maintainers: $team_virt UnderGreen krauthosting
- ignore: tleguern
- $modules/proxmox.py:
- ignore: skvidal
- maintainers: UnderGreen krauthosting
- $modules/proxmox_disk.py:
- maintainers: castorsky krauthosting
- $modules/proxmox_kvm.py:
- ignore: skvidal
- maintainers: helldorado krauthosting
- $modules/proxmox_nic.py:
- maintainers: Kogelvis krauthosting
- $modules/proxmox_node_info.py:
- maintainers: jwbernin krauthosting
- $modules/proxmox_storage_contents_info.py:
- maintainers: l00ptr krauthosting
- $modules/proxmox_tasks_info:
- maintainers: paginabianca krauthosting
- $modules/proxmox_template.py:
- ignore: skvidal
- maintainers: UnderGreen krauthosting
$modules/pubnub_blocks.py:
maintainers: parfeon pubnub
$modules/pulp_repo.py:
@@ -1159,12 +1173,6 @@ files:
keywords: kvm libvirt proxmox qemu
labels: rhevm virt
maintainers: $team_virt TimothyVandenbrande
- $modules/rhn_channel.py:
- labels: rhn_channel
- maintainers: vincentvdk alikins $team_rhn
- $modules/rhn_register.py:
- labels: rhn_register
- maintainers: jlaska $team_rhn
$modules/rhsm_release.py:
maintainers: seandst $team_rhsm
$modules/rhsm_repository.py:
@@ -1197,9 +1205,9 @@ files:
$modules/scaleway_compute_private_network.py:
maintainers: pastral
$modules/scaleway_container.py:
- maintainers: Lunik
+ maintainers: Lunik
$modules/scaleway_container_info.py:
- maintainers: Lunik
+ maintainers: Lunik
$modules/scaleway_container_namespace.py:
maintainers: Lunik
$modules/scaleway_container_namespace_info.py:
@@ -1328,6 +1336,12 @@ files:
maintainers: precurse
$modules/sysrc.py:
maintainers: dlundgren
+ $modules/systemd_creds_decrypt.py:
+ maintainers: konstruktoid
+ $modules/systemd_creds_encrypt.py:
+ maintainers: konstruktoid
+ $modules/systemd_info.py:
+ maintainers: NomakCooper
$modules/sysupgrade.py:
maintainers: precurse
$modules/taiga_issue.py:
@@ -1401,6 +1415,8 @@ files:
maintainers: dinoocch the-maldridge
$modules/xcc_:
maintainers: panyy3 renxulei
+ $modules/xdg_mime.py:
+ maintainers: mhalano
$modules/xenserver_:
maintainers: bvitnik
$modules/xenserver_facts.py:
@@ -1433,6 +1449,8 @@ files:
maintainers: natefoo
$modules/znode.py:
maintainers: treyperry
+ $modules/zpool.py:
+ maintainers: tomhesse
$modules/zpool_facts:
keywords: beadm dladm illumos ipadm nexenta omnios openindiana pfexec smartos solaris sunos zfs zpool
labels: solaris
@@ -1447,6 +1465,9 @@ files:
maintainers: $team_suse
$plugin_utils/ansible_type.py:
maintainers: vbotka
+ $modules/zypper_repository_info.py:
+ labels: zypper
+ maintainers: $team_suse TobiasZeuch181
$plugin_utils/keys_filter.py:
maintainers: vbotka
$plugin_utils/unsafe.py:
@@ -1496,6 +1517,22 @@ files:
maintainers: russoz
docs/docsite/rst/guide_deps.rst:
maintainers: russoz
+ docs/docsite/rst/guide_iocage.rst:
+ maintainers: russoz felixfontein
+ docs/docsite/rst/guide_iocage_inventory.rst:
+ maintainers: vbotka
+ docs/docsite/rst/guide_iocage_inventory_aliases.rst:
+ maintainers: vbotka
+ docs/docsite/rst/guide_iocage_inventory_basics.rst:
+ maintainers: vbotka
+ docs/docsite/rst/guide_iocage_inventory_dhcp.rst:
+ maintainers: vbotka
+ docs/docsite/rst/guide_iocage_inventory_hooks.rst:
+ maintainers: vbotka
+ docs/docsite/rst/guide_iocage_inventory_properties.rst:
+ maintainers: vbotka
+ docs/docsite/rst/guide_iocage_inventory_tags.rst:
+ maintainers: vbotka
docs/docsite/rst/guide_modulehelper.rst:
maintainers: russoz
docs/docsite/rst/guide_online.rst:
@@ -1504,6 +1541,8 @@ files:
maintainers: baldwinSPC nurfet-becirevic t0mk teebes
docs/docsite/rst/guide_scaleway.rst:
maintainers: $team_scaleway
+ docs/docsite/rst/guide_uthelper.rst:
+ maintainers: russoz
docs/docsite/rst/guide_vardict.rst:
maintainers: russoz
docs/docsite/rst/test_guide.rst:
@@ -1555,7 +1594,6 @@ macros:
team_oracle: manojmeda mross22 nalsaber
team_purestorage: bannaych dnix101 genegr lionmax opslounge raekins sdodsley sile16
team_redfish: mraineri tomasg2012 xmadsen renxulei rajeevkallur bhavya06 jyundt
- team_rhn: FlossWare alikins barnabycourt vritant
team_rhsm: cnsnyder ptoscano
team_scaleway: remyleone abarbare
team_solaris: bcoca fishman jasperla jpdasma mator scathatheworm troy2914 xen0l
diff --git a/.github/ISSUE_TEMPLATE/bug_report.yml b/.github/ISSUE_TEMPLATE/bug_report.yml
index f64de2abe3..4b1c1bfb95 100644
--- a/.github/ISSUE_TEMPLATE/bug_report.yml
+++ b/.github/ISSUE_TEMPLATE/bug_report.yml
@@ -7,147 +7,147 @@ name: Bug report
description: Create a report to help us improve
body:
-- type: markdown
- attributes:
- value: |
- ⚠
- Verify first that your issue is not [already reported on GitHub][issue search].
- Also test if the latest release and devel branch are affected too.
- *Complete **all** sections as described, this form is processed automatically.*
+ - type: markdown
+ attributes:
+ value: |
+ ⚠
+ Verify first that your issue is not [already reported on GitHub][issue search].
+ Also test if the latest release and devel branch are affected too.
+ *Complete **all** sections as described, this form is processed automatically.*
- [issue search]: https://github.com/ansible-collections/community.general/search?q=is%3Aissue&type=issues
+ [issue search]: https://github.com/ansible-collections/community.general/search?q=is%3Aissue&type=issues
-- type: textarea
- attributes:
- label: Summary
- description: Explain the problem briefly below.
- placeholder: >-
- When I try to do X with the collection from the main branch on GitHub, Y
- breaks in a way Z under the env E. Here are all the details I know
- about this problem...
- validations:
- required: true
-
-- type: dropdown
- attributes:
- label: Issue Type
- # FIXME: Once GitHub allows defining the default choice, update this
- options:
- - Bug Report
- validations:
- required: true
-
-- type: textarea
- attributes:
- # For smaller collections we could use a multi-select and hardcode the list
- # May generate this list via GitHub action and walking files under https://github.com/ansible-collections/community.general/tree/main/plugins
- # Select from list, filter as you type (`mysql` would only show the 3 mysql components)
- # OR freeform - doesn't seem to be supported in adaptivecards
- label: Component Name
- description: >-
- Write the short name of the module, plugin, task or feature below,
- *use your best guess if unsure*. Do not include `community.general.`!
- placeholder: dnf, apt, yum, pip, user etc.
- validations:
- required: true
-
-- type: textarea
- attributes:
- label: Ansible Version
- description: >-
- Paste verbatim output from `ansible --version` between
- tripple backticks.
- value: |
- ```console (paste below)
- $ ansible --version
-
- ```
- validations:
- required: true
-
-- type: textarea
- attributes:
- label: Community.general Version
- description: >-
- Paste verbatim output from "ansible-galaxy collection list community.general"
- between tripple backticks.
- value: |
- ```console (paste below)
- $ ansible-galaxy collection list community.general
-
- ```
- validations:
- required: true
-
-- type: textarea
- attributes:
- label: Configuration
- description: >-
- If this issue has an example piece of YAML that can help to reproduce this problem, please provide it.
- This can be a piece of YAML from, e.g., an automation, script, scene or configuration.
- Paste verbatim output from `ansible-config dump --only-changed` between quotes
- value: |
- ```console (paste below)
- $ ansible-config dump --only-changed
-
- ```
-
-
-- type: textarea
- attributes:
- label: OS / Environment
- description: >-
- Provide all relevant information below, e.g. target OS versions,
- network device firmware, etc.
- placeholder: RHEL 8, CentOS Stream etc.
- validations:
- required: false
-
-
-- type: textarea
- attributes:
- label: Steps to Reproduce
- description: |
- Describe exactly how to reproduce the problem, using a minimal test-case. It would *really* help us understand your problem if you could also passed any playbooks, configs and commands you used.
-
- **HINT:** You can paste https://gist.github.com links for larger files.
- value: |
-
- ```yaml (paste below)
-
- ```
- validations:
- required: true
-
-- type: textarea
- attributes:
- label: Expected Results
- description: >-
- Describe what you expected to happen when running the steps above.
- placeholder: >-
- I expected X to happen because I assumed Y.
- that it did not.
- validations:
- required: true
-
-- type: textarea
- attributes:
- label: Actual Results
- description: |
- Describe what actually happened. If possible run with extra verbosity (`-vvvv`).
-
- Paste verbatim command output between quotes.
- value: |
- ```console (paste below)
-
- ```
-- type: checkboxes
- attributes:
- label: Code of Conduct
- description: |
- Read the [Ansible Code of Conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html?utm_medium=github&utm_source=issue_form--ansible-collections) first.
- options:
- - label: I agree to follow the Ansible Code of Conduct
+ - type: textarea
+ attributes:
+ label: Summary
+ description: Explain the problem briefly below.
+ placeholder: >-
+ When I try to do X with the collection from the main branch on GitHub, Y
+ breaks in a way Z under the env E. Here are all the details I know
+ about this problem...
+ validations:
required: true
+
+ - type: dropdown
+ attributes:
+ label: Issue Type
+ # FIXME: Once GitHub allows defining the default choice, update this
+ options:
+ - Bug Report
+ validations:
+ required: true
+
+ - type: textarea
+ attributes:
+ # For smaller collections we could use a multi-select and hardcode the list
+ # May generate this list via GitHub action and walking files under https://github.com/ansible-collections/community.general/tree/main/plugins
+ # Select from list, filter as you type (`mysql` would only show the 3 mysql components)
+ # OR freeform - doesn't seem to be supported in adaptivecards
+ label: Component Name
+ description: >-
+ Write the short name of the module, plugin, task or feature below,
+ *use your best guess if unsure*. Do not include `community.general.`!
+ placeholder: dnf, apt, yum, pip, user etc.
+ validations:
+ required: true
+
+ - type: textarea
+ attributes:
+ label: Ansible Version
+ description: >-
+ Paste verbatim output from `ansible --version` between
+ tripple backticks.
+ value: |
+ ```console (paste below)
+ $ ansible --version
+
+ ```
+ validations:
+ required: true
+
+ - type: textarea
+ attributes:
+ label: Community.general Version
+ description: >-
+ Paste verbatim output from "ansible-galaxy collection list community.general"
+ between tripple backticks.
+ value: |
+ ```console (paste below)
+ $ ansible-galaxy collection list community.general
+
+ ```
+ validations:
+ required: true
+
+ - type: textarea
+ attributes:
+ label: Configuration
+ description: >-
+ If this issue has an example piece of YAML that can help to reproduce this problem, please provide it.
+ This can be a piece of YAML from, e.g., an automation, script, scene or configuration.
+ Paste verbatim output from `ansible-config dump --only-changed` between quotes
+ value: |
+ ```console (paste below)
+ $ ansible-config dump --only-changed
+
+ ```
+
+
+ - type: textarea
+ attributes:
+ label: OS / Environment
+ description: >-
+ Provide all relevant information below, e.g. target OS versions,
+ network device firmware, etc.
+ placeholder: RHEL 8, CentOS Stream etc.
+ validations:
+ required: false
+
+
+ - type: textarea
+ attributes:
+ label: Steps to Reproduce
+ description: |
+ Describe exactly how to reproduce the problem, using a minimal test-case. It would *really* help us understand your problem if you could also passed any playbooks, configs and commands you used.
+
+ **HINT:** You can paste https://gist.github.com links for larger files.
+ value: |
+
+ ```yaml (paste below)
+
+ ```
+ validations:
+ required: true
+
+ - type: textarea
+ attributes:
+ label: Expected Results
+ description: >-
+ Describe what you expected to happen when running the steps above.
+ placeholder: >-
+ I expected X to happen because I assumed Y.
+ that it did not.
+ validations:
+ required: true
+
+ - type: textarea
+ attributes:
+ label: Actual Results
+ description: |
+ Describe what actually happened. If possible run with extra verbosity (`-vvvv`).
+
+ Paste verbatim command output between quotes.
+ value: |
+ ```console (paste below)
+
+ ```
+ - type: checkboxes
+ attributes:
+ label: Code of Conduct
+ description: |
+ Read the [Ansible Code of Conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html?utm_medium=github&utm_source=issue_form--ansible-collections) first.
+ options:
+ - label: I agree to follow the Ansible Code of Conduct
+ required: true
...
diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml
index 0cc2db058c..476eed516e 100644
--- a/.github/ISSUE_TEMPLATE/config.yml
+++ b/.github/ISSUE_TEMPLATE/config.yml
@@ -6,26 +6,26 @@
# Ref: https://help.github.com/en/github/building-a-strong-community/configuring-issue-templates-for-your-repository#configuring-the-template-chooser
blank_issues_enabled: false # default: true
contact_links:
-- name: Security bug report
- url: https://docs.ansible.com/ansible-core/devel/community/reporting_bugs_and_features.html?utm_medium=github&utm_source=issue_template_chooser_ansible_collections
- about: |
- Please learn how to report security vulnerabilities here.
+ - name: Security bug report
+ url: https://docs.ansible.com/ansible-core/devel/community/reporting_bugs_and_features.html?utm_medium=github&utm_source=issue_template_chooser_ansible_collections
+ about: |
+ Please learn how to report security vulnerabilities here.
- For all security related bugs, email security@ansible.com
- instead of using this issue tracker and you will receive
- a prompt response.
+ For all security related bugs, email security@ansible.com
+ instead of using this issue tracker and you will receive
+ a prompt response.
- For more information, see
- https://docs.ansible.com/ansible/latest/community/reporting_bugs_and_features.html
-- name: Ansible Code of Conduct
- url: https://docs.ansible.com/ansible/latest/community/code_of_conduct.html?utm_medium=github&utm_source=issue_template_chooser_ansible_collections
- about: Be nice to other members of the community.
-- name: Talks to the community
- url: https://docs.ansible.com/ansible/latest/community/communication.html?utm_medium=github&utm_source=issue_template_chooser#mailing-list-information
- about: Please ask and answer usage questions here
-- name: Working groups
- url: https://github.com/ansible/community/wiki
- about: Interested in improving a specific area? Become a part of a working group!
-- name: For Enterprise
- url: https://www.ansible.com/products/engine?utm_medium=github&utm_source=issue_template_chooser_ansible_collections
- about: Red Hat offers support for the Ansible Automation Platform
+ For more information, see
+ https://docs.ansible.com/ansible/latest/community/reporting_bugs_and_features.html
+ - name: Ansible Code of Conduct
+ url: https://docs.ansible.com/ansible/latest/community/code_of_conduct.html?utm_medium=github&utm_source=issue_template_chooser_ansible_collections
+ about: Be nice to other members of the community.
+ - name: Talks to the community
+ url: https://docs.ansible.com/ansible/latest/community/communication.html?utm_medium=github&utm_source=issue_template_chooser#mailing-list-information
+ about: Please ask and answer usage questions here
+ - name: Working groups
+ url: https://github.com/ansible/community/wiki
+ about: Interested in improving a specific area? Become a part of a working group!
+ - name: For Enterprise
+ url: https://www.ansible.com/products/engine?utm_medium=github&utm_source=issue_template_chooser_ansible_collections
+ about: Red Hat offers support for the Ansible Automation Platform
diff --git a/.github/ISSUE_TEMPLATE/documentation_report.yml b/.github/ISSUE_TEMPLATE/documentation_report.yml
index 6ec49fcb37..2ad4bce44a 100644
--- a/.github/ISSUE_TEMPLATE/documentation_report.yml
+++ b/.github/ISSUE_TEMPLATE/documentation_report.yml
@@ -8,122 +8,122 @@ description: Ask us about docs
# NOTE: issue body is enabled to allow screenshots
body:
-- type: markdown
- attributes:
- value: |
- ⚠
- Verify first that your issue is not [already reported on GitHub][issue search].
- Also test if the latest release and devel branch are affected too.
- *Complete **all** sections as described, this form is processed automatically.*
+ - type: markdown
+ attributes:
+ value: |
+ ⚠
+ Verify first that your issue is not [already reported on GitHub][issue search].
+ Also test if the latest release and devel branch are affected too.
+ *Complete **all** sections as described, this form is processed automatically.*
- [issue search]: https://github.com/ansible-collections/community.general/search?q=is%3Aissue&type=issues
+ [issue search]: https://github.com/ansible-collections/community.general/search?q=is%3Aissue&type=issues
-- type: textarea
- attributes:
- label: Summary
- description: |
- Explain the problem briefly below, add suggestions to wording or structure.
+ - type: textarea
+ attributes:
+ label: Summary
+ description: |
+ Explain the problem briefly below, add suggestions to wording or structure.
- **HINT:** Did you know the documentation has an `Edit on GitHub` link on every page?
- placeholder: >-
- I was reading the Collection documentation of version X and I'm having
- problems understanding Y. It would be very helpful if that got
- rephrased as Z.
- validations:
- required: true
-
-- type: dropdown
- attributes:
- label: Issue Type
- # FIXME: Once GitHub allows defining the default choice, update this
- options:
- - Documentation Report
- validations:
- required: true
-
-- type: input
- attributes:
- label: Component Name
- description: >-
- Write the short name of the file, module, plugin, task or feature below,
- *use your best guess if unsure*. Do not include `community.general.`!
- placeholder: mysql_user
- validations:
- required: true
-
-- type: textarea
- attributes:
- label: Ansible Version
- description: >-
- Paste verbatim output from `ansible --version` between
- tripple backticks.
- value: |
- ```console (paste below)
- $ ansible --version
-
- ```
- validations:
- required: false
-
-- type: textarea
- attributes:
- label: Community.general Version
- description: >-
- Paste verbatim output from "ansible-galaxy collection list community.general"
- between tripple backticks.
- value: |
- ```console (paste below)
- $ ansible-galaxy collection list community.general
-
- ```
- validations:
- required: true
-
-- type: textarea
- attributes:
- label: Configuration
- description: >-
- Paste verbatim output from `ansible-config dump --only-changed` between quotes.
- value: |
- ```console (paste below)
- $ ansible-config dump --only-changed
-
- ```
- validations:
- required: false
-
-- type: textarea
- attributes:
- label: OS / Environment
- description: >-
- Provide all relevant information below, e.g. OS version,
- browser, etc.
- placeholder: Fedora 33, Firefox etc.
- validations:
- required: false
-
-- type: textarea
- attributes:
- label: Additional Information
- description: |
- Describe how this improves the documentation, e.g. before/after situation or screenshots.
-
- **Tip:** It's not possible to upload the screenshot via this field directly but you can use the last textarea in this form to attach them.
-
- **HINT:** You can paste https://gist.github.com links for larger files.
- placeholder: >-
- When the improvement is applied, it makes it more straightforward
- to understand X.
- validations:
- required: false
-
-- type: checkboxes
- attributes:
- label: Code of Conduct
- description: |
- Read the [Ansible Code of Conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html?utm_medium=github&utm_source=issue_form--ansible-collections) first.
- options:
- - label: I agree to follow the Ansible Code of Conduct
+ **HINT:** Did you know the documentation has an `Edit on GitHub` link on every page?
+ placeholder: >-
+ I was reading the Collection documentation of version X and I'm having
+ problems understanding Y. It would be very helpful if that got
+ rephrased as Z.
+ validations:
required: true
+
+ - type: dropdown
+ attributes:
+ label: Issue Type
+ # FIXME: Once GitHub allows defining the default choice, update this
+ options:
+ - Documentation Report
+ validations:
+ required: true
+
+ - type: input
+ attributes:
+ label: Component Name
+ description: >-
+ Write the short name of the file, module, plugin, task or feature below,
+ *use your best guess if unsure*. Do not include `community.general.`!
+ placeholder: mysql_user
+ validations:
+ required: true
+
+ - type: textarea
+ attributes:
+ label: Ansible Version
+ description: >-
+ Paste verbatim output from `ansible --version` between
+ tripple backticks.
+ value: |
+ ```console (paste below)
+ $ ansible --version
+
+ ```
+ validations:
+ required: false
+
+ - type: textarea
+ attributes:
+ label: Community.general Version
+ description: >-
+ Paste verbatim output from "ansible-galaxy collection list community.general"
+ between tripple backticks.
+ value: |
+ ```console (paste below)
+ $ ansible-galaxy collection list community.general
+
+ ```
+ validations:
+ required: true
+
+ - type: textarea
+ attributes:
+ label: Configuration
+ description: >-
+ Paste verbatim output from `ansible-config dump --only-changed` between quotes.
+ value: |
+ ```console (paste below)
+ $ ansible-config dump --only-changed
+
+ ```
+ validations:
+ required: false
+
+ - type: textarea
+ attributes:
+ label: OS / Environment
+ description: >-
+ Provide all relevant information below, e.g. OS version,
+ browser, etc.
+ placeholder: Fedora 33, Firefox etc.
+ validations:
+ required: false
+
+ - type: textarea
+ attributes:
+ label: Additional Information
+ description: |
+ Describe how this improves the documentation, e.g. before/after situation or screenshots.
+
+ **Tip:** It's not possible to upload the screenshot via this field directly but you can use the last textarea in this form to attach them.
+
+ **HINT:** You can paste https://gist.github.com links for larger files.
+ placeholder: >-
+ When the improvement is applied, it makes it more straightforward
+ to understand X.
+ validations:
+ required: false
+
+ - type: checkboxes
+ attributes:
+ label: Code of Conduct
+ description: |
+ Read the [Ansible Code of Conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html?utm_medium=github&utm_source=issue_form--ansible-collections) first.
+ options:
+ - label: I agree to follow the Ansible Code of Conduct
+ required: true
...
diff --git a/.github/ISSUE_TEMPLATE/feature_request.yml b/.github/ISSUE_TEMPLATE/feature_request.yml
index f34564283c..dc62f94c5c 100644
--- a/.github/ISSUE_TEMPLATE/feature_request.yml
+++ b/.github/ISSUE_TEMPLATE/feature_request.yml
@@ -7,67 +7,67 @@ name: Feature request
description: Suggest an idea for this project
body:
-- type: markdown
- attributes:
- value: |
- ⚠
- Verify first that your issue is not [already reported on GitHub][issue search].
- Also test if the latest release and devel branch are affected too.
- *Complete **all** sections as described, this form is processed automatically.*
+ - type: markdown
+ attributes:
+ value: |
+ ⚠
+ Verify first that your issue is not [already reported on GitHub][issue search].
+ Also test if the latest release and devel branch are affected too.
+ *Complete **all** sections as described, this form is processed automatically.*
- [issue search]: https://github.com/ansible-collections/community.general/search?q=is%3Aissue&type=issues
+ [issue search]: https://github.com/ansible-collections/community.general/search?q=is%3Aissue&type=issues
-- type: textarea
- attributes:
- label: Summary
- description: Describe the new feature/improvement briefly below.
- placeholder: >-
- I am trying to do X with the collection from the main branch on GitHub and
- I think that implementing a feature Y would be very helpful for me and
- every other user of community.general because of Z.
- validations:
- required: true
-
-- type: dropdown
- attributes:
- label: Issue Type
- # FIXME: Once GitHub allows defining the default choice, update this
- options:
- - Feature Idea
- validations:
- required: true
-
-- type: input
- attributes:
- label: Component Name
- description: >-
- Write the short name of the module or plugin, or which other part(s) of the collection this feature affects.
- *use your best guess if unsure*. Do not include `community.general.`!
- placeholder: dnf, apt, yum, pip, user etc.
- validations:
- required: true
-
-- type: textarea
- attributes:
- label: Additional Information
- description: |
- Describe how the feature would be used, why it is needed and what it would solve.
-
- **HINT:** You can paste https://gist.github.com links for larger files.
- value: |
-
- ```yaml (paste below)
-
- ```
- validations:
- required: false
-- type: checkboxes
- attributes:
- label: Code of Conduct
- description: |
- Read the [Ansible Code of Conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html?utm_medium=github&utm_source=issue_form--ansible-collections) first.
- options:
- - label: I agree to follow the Ansible Code of Conduct
+ - type: textarea
+ attributes:
+ label: Summary
+ description: Describe the new feature/improvement briefly below.
+ placeholder: >-
+ I am trying to do X with the collection from the main branch on GitHub and
+ I think that implementing a feature Y would be very helpful for me and
+ every other user of community.general because of Z.
+ validations:
required: true
+
+ - type: dropdown
+ attributes:
+ label: Issue Type
+ # FIXME: Once GitHub allows defining the default choice, update this
+ options:
+ - Feature Idea
+ validations:
+ required: true
+
+ - type: input
+ attributes:
+ label: Component Name
+ description: >-
+ Write the short name of the module or plugin, or which other part(s) of the collection this feature affects.
+ *use your best guess if unsure*. Do not include `community.general.`!
+ placeholder: dnf, apt, yum, pip, user etc.
+ validations:
+ required: true
+
+ - type: textarea
+ attributes:
+ label: Additional Information
+ description: |
+ Describe how the feature would be used, why it is needed and what it would solve.
+
+ **HINT:** You can paste https://gist.github.com links for larger files.
+ value: |
+
+ ```yaml (paste below)
+
+ ```
+ validations:
+ required: false
+ - type: checkboxes
+ attributes:
+ label: Code of Conduct
+ description: |
+ Read the [Ansible Code of Conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html?utm_medium=github&utm_source=issue_form--ansible-collections) first.
+ options:
+ - label: I agree to follow the Ansible Code of Conduct
+ required: true
...
diff --git a/.github/dependabot.yml b/.github/dependabot.yml
index 2f4ff900d8..f71b322d2a 100644
--- a/.github/dependabot.yml
+++ b/.github/dependabot.yml
@@ -9,3 +9,7 @@ updates:
directory: "/"
schedule:
interval: "weekly"
+ groups:
+ ci:
+ patterns:
+ - "*"
diff --git a/.github/workflows/ansible-test.yml b/.github/workflows/ansible-test.yml
index 3ee0689241..89b36e6163 100644
--- a/.github/workflows/ansible-test.yml
+++ b/.github/workflows/ansible-test.yml
@@ -7,7 +7,7 @@
# https://github.com/marketplace/actions/ansible-test
name: EOL CI
-on:
+"on":
# Run EOL CI against all pushes (direct commits, also merged PRs), Pull Requests
push:
branches:
@@ -29,14 +29,7 @@ jobs:
strategy:
matrix:
ansible:
- - '2.13'
- - '2.14'
- - '2.15'
- # Ansible-test on various stable branches does not yet work well with cgroups v2.
- # Since ubuntu-latest now uses Ubuntu 22.04, we need to fall back to the ubuntu-20.04
- # image for these stable branches. The list of branches where this is necessary will
- # shrink over time, check out https://github.com/ansible-collections/news-for-maintainers/issues/28
- # for the latest list.
+ - '2.16'
runs-on: ubuntu-latest
steps:
- name: Perform sanity testing
@@ -51,11 +44,6 @@ jobs:
git clone --depth=1 --single-branch https://github.com/ansible-collections/community.internal_test_tools.git ../../community/internal_test_tools
units:
- # Ansible-test on various stable branches does not yet work well with cgroups v2.
- # Since ubuntu-latest now uses Ubuntu 22.04, we need to fall back to the ubuntu-20.04
- # image for these stable branches. The list of branches where this is necessary will
- # shrink over time, check out https://github.com/ansible-collections/news-for-maintainers/issues/28
- # for the latest list.
runs-on: ubuntu-latest
name: EOL Units (Ⓐ${{ matrix.ansible }}+py${{ matrix.python }})
strategy:
@@ -69,20 +57,12 @@ jobs:
exclude:
- ansible: ''
include:
- - ansible: '2.13'
+ - ansible: '2.16'
python: '2.7'
- - ansible: '2.13'
- python: '3.8'
- - ansible: '2.13'
- python: '2.7'
- - ansible: '2.13'
- python: '3.8'
- - ansible: '2.14'
- python: '3.9'
- - ansible: '2.15'
- python: '3.5'
- - ansible: '2.15'
- python: '3.10'
+ - ansible: '2.16'
+ python: '3.6'
+ - ansible: '2.16'
+ python: '3.11'
steps:
- name: >-
@@ -102,11 +82,6 @@ jobs:
testing-type: units
integration:
- # Ansible-test on various stable branches does not yet work well with cgroups v2.
- # Since ubuntu-latest now uses Ubuntu 22.04, we need to fall back to the ubuntu-20.04
- # image for these stable branches. The list of branches where this is necessary will
- # shrink over time, check out https://github.com/ansible-collections/news-for-maintainers/issues/28
- # for the latest list.
runs-on: ubuntu-latest
name: EOL I (Ⓐ${{ matrix.ansible }}+${{ matrix.docker }}+py${{ matrix.python }}:${{ matrix.target }})
strategy:
@@ -123,81 +98,56 @@ jobs:
exclude:
- ansible: ''
include:
- # 2.13
- - ansible: '2.13'
- docker: fedora35
+ # 2.16
+ # CentOS 7 does not work in GHA, that's why it's not listed here.
+ - ansible: '2.16'
+ docker: fedora38
python: ''
target: azp/posix/1/
- - ansible: '2.13'
- docker: fedora35
+ - ansible: '2.16'
+ docker: fedora38
python: ''
target: azp/posix/2/
- - ansible: '2.13'
- docker: fedora35
+ - ansible: '2.16'
+ docker: fedora38
python: ''
target: azp/posix/3/
- - ansible: '2.13'
- docker: opensuse15py2
+ - ansible: '2.16'
+ docker: opensuse15
python: ''
target: azp/posix/1/
- - ansible: '2.13'
- docker: opensuse15py2
+ - ansible: '2.16'
+ docker: opensuse15
python: ''
target: azp/posix/2/
- - ansible: '2.13'
- docker: opensuse15py2
+ - ansible: '2.16'
+ docker: opensuse15
python: ''
target: azp/posix/3/
- - ansible: '2.13'
+ - ansible: '2.16'
docker: alpine3
python: ''
target: azp/posix/1/
- - ansible: '2.13'
+ - ansible: '2.16'
docker: alpine3
python: ''
target: azp/posix/2/
- - ansible: '2.13'
+ - ansible: '2.16'
docker: alpine3
python: ''
target: azp/posix/3/
- # 2.14
- - ansible: '2.14'
- docker: alpine3
- python: ''
- target: azp/posix/1/
- - ansible: '2.14'
- docker: alpine3
- python: ''
- target: azp/posix/2/
- - ansible: '2.14'
- docker: alpine3
- python: ''
- target: azp/posix/3/
- # 2.15
- - ansible: '2.15'
- docker: fedora37
- python: ''
- target: azp/posix/1/
- - ansible: '2.15'
- docker: fedora37
- python: ''
- target: azp/posix/2/
- - ansible: '2.15'
- docker: fedora37
- python: ''
- target: azp/posix/3/
# Right now all generic tests are disabled. Uncomment when at least one of them is re-enabled.
- # - ansible: '2.13'
+ # - ansible: '2.16'
# docker: default
- # python: '3.9'
+ # python: '2.7'
# target: azp/generic/1/
- # - ansible: '2.14'
+ # - ansible: '2.16'
# docker: default
- # python: '3.10'
+ # python: '3.6'
# target: azp/generic/1/
- # - ansible: '2.15'
+ # - ansible: '2.16'
# docker: default
- # python: '3.9'
+ # python: '3.11'
# target: azp/generic/1/
steps:
diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml
index e8572fafb6..ec344315bb 100644
--- a/.github/workflows/codeql-analysis.yml
+++ b/.github/workflows/codeql-analysis.yml
@@ -5,7 +5,7 @@
name: "Code scanning - action"
-on:
+"on":
schedule:
- cron: '26 19 * * 1'
workflow_dispatch:
@@ -23,16 +23,16 @@ jobs:
runs-on: ubuntu-latest
steps:
- - name: Checkout repository
- uses: actions/checkout@v4
- with:
- persist-credentials: false
+ - name: Checkout repository
+ uses: actions/checkout@v4
+ with:
+ persist-credentials: false
- # Initializes the CodeQL tools for scanning.
- - name: Initialize CodeQL
- uses: github/codeql-action/init@v3
- with:
- languages: python
+ # Initializes the CodeQL tools for scanning.
+ - name: Initialize CodeQL
+ uses: github/codeql-action/init@v3
+ with:
+ languages: python
- - name: Perform CodeQL Analysis
- uses: github/codeql-action/analyze@v3
+ - name: Perform CodeQL Analysis
+ uses: github/codeql-action/analyze@v3
diff --git a/.yamllint b/.yamllint
new file mode 100644
index 0000000000..c10d86ab19
--- /dev/null
+++ b/.yamllint
@@ -0,0 +1,52 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+extends: default
+
+ignore: |
+ /changelogs/
+
+rules:
+ line-length:
+ max: 1000
+ level: error
+ document-start: disable
+ document-end: disable
+ truthy:
+ level: error
+ allowed-values:
+ - 'true'
+ - 'false'
+ indentation:
+ spaces: 2
+ indent-sequences: true
+ key-duplicates: enable
+ trailing-spaces: enable
+ new-line-at-end-of-file: disable
+ hyphens:
+ max-spaces-after: 1
+ empty-lines:
+ max: 2
+ max-start: 0
+ max-end: 0
+ commas:
+ max-spaces-before: 0
+ min-spaces-after: 1
+ max-spaces-after: 1
+ colons:
+ max-spaces-before: 0
+ max-spaces-after: 1
+ brackets:
+ min-spaces-inside: 0
+ max-spaces-inside: 0
+ braces:
+ min-spaces-inside: 0
+ max-spaces-inside: 1
+ octal-values:
+ forbid-implicit-octal: true
+ forbid-explicit-octal: true
+ comments:
+ min-spaces-from-content: 1
+ comments-indentation: false
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 52a7e52613..b35c52441b 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,1023 +1,5 @@
-# Community General Release Notes
+# Placeholder changelog
-**Topics**
-
-- v9\.5\.9
- - Release Summary
- - Bugfixes
-- v9\.5\.8
- - Release Summary
- - Bugfixes
-- v9\.5\.7
- - Release Summary
- - Minor Changes
- - Bugfixes
- - Known Issues
-- v9\.5\.6
- - Release Summary
- - Minor Changes
- - Bugfixes
-- v9\.5\.5
- - Release Summary
- - Bugfixes
-- v9\.5\.4
- - Security Fixes
- - Bugfixes
-- v9\.5\.3
- - Release Summary
- - Minor Changes
- - Security Fixes
- - Bugfixes
-- v9\.5\.2
- - Release Summary
- - Minor Changes
- - Bugfixes
-- v9\.5\.1
- - Release Summary
- - Minor Changes
- - Bugfixes
-- v9\.5\.0
- - Release Summary
- - Minor Changes
- - Deprecated Features
- - Bugfixes
- - New Modules
-- v9\.4\.0
- - Release Summary
- - Minor Changes
- - Deprecated Features
- - Bugfixes
- - New Modules
-- v9\.3\.0
- - Release Summary
- - Minor Changes
- - Bugfixes
- - New Modules
-- v9\.2\.0
- - Release Summary
- - Minor Changes
- - Bugfixes
- - New Plugins
- - Filter
- - Test
-- v9\.1\.0
- - Release Summary
- - Minor Changes
- - Deprecated Features
- - Bugfixes
- - Known Issues
- - New Plugins
- - Filter
- - New Modules
-- v9\.0\.1
- - Release Summary
- - Minor Changes
- - Bugfixes
-- v9\.0\.0
- - Release Summary
- - Minor Changes
- - Breaking Changes / Porting Guide
- - Deprecated Features
- - Removed Features \(previously deprecated\)
- - Security Fixes
- - Bugfixes
- - New Plugins
- - Become
- - Callback
- - Connection
- - Filter
- - Lookup
- - Test
- - New Modules
-This changelog describes changes after version 8\.0\.0\.
-
-
-## v9\.5\.9
-
-
-### Release Summary
-
-Bugfix release\.
-
-
-### Bugfixes
-
-* yaml callback plugin \- adjust to latest changes in ansible\-core devel \([https\://github\.com/ansible\-collections/community\.general/pull/10212](https\://github\.com/ansible\-collections/community\.general/pull/10212)\)\.
-* yaml callback plugin \- when using ansible\-core 2\.19\.0b2 or newer\, uses a new utility provided by ansible\-core\. This allows us to remove all hacks and vendored code that was part of the plugin for ansible\-core versions with Data Tagging so far \([https\://github\.com/ansible\-collections/community\.general/pull/10242](https\://github\.com/ansible\-collections/community\.general/pull/10242)\)\.
-
-
-## v9\.5\.8
-
-
-### Release Summary
-
-Regular bugfix release\.
-
-
-### Bugfixes
-
-* cobbler\_system \- fix bug with Cobbler \>\= 3\.4\.0 caused by giving more than 2 positional arguments to CobblerXMLRPCInterface\.get\_system\_handle\(\)
\([https\://github\.com/ansible\-collections/community\.general/issues/8506](https\://github\.com/ansible\-collections/community\.general/issues/8506)\, [https\://github\.com/ansible\-collections/community\.general/pull/10145](https\://github\.com/ansible\-collections/community\.general/pull/10145)\)\.
-* kdeconfig \- allow option values beginning with a dash \([https\://github\.com/ansible\-collections/community\.general/issues/10127](https\://github\.com/ansible\-collections/community\.general/issues/10127)\, [https\://github\.com/ansible\-collections/community\.general/pull/10128](https\://github\.com/ansible\-collections/community\.general/pull/10128)\)\.
-* keycloak\_user\_rolemapping \- fix \-\-diff
mode \([https\://github\.com/ansible\-collections/community\.general/issues/10067](https\://github\.com/ansible\-collections/community\.general/issues/10067)\, [https\://github\.com/ansible\-collections/community\.general/pull/10075](https\://github\.com/ansible\-collections/community\.general/pull/10075)\)\.
-* pickle cache plugin \- avoid extra JSON serialization with ansible\-core \>\= 2\.19 \([https\://github\.com/ansible\-collections/community\.general/pull/10136](https\://github\.com/ansible\-collections/community\.general/pull/10136)\)\.
-* rundeck\_acl\_policy \- ensure that project ACLs are sent to the correct endpoint \([https\://github\.com/ansible\-collections/community\.general/pull/10097](https\://github\.com/ansible\-collections/community\.general/pull/10097)\)\.
-* sysrc \- split the output of sysrc \-e \-a
on the first \=
only \([https\://github\.com/ansible\-collections/community\.general/issues/10120](https\://github\.com/ansible\-collections/community\.general/issues/10120)\, [https\://github\.com/ansible\-collections/community\.general/pull/10121](https\://github\.com/ansible\-collections/community\.general/pull/10121)\)\.
-
-
-## v9\.5\.7
-
-
-### Release Summary
-
-Regular bugfix release\.
-
-
-### Minor Changes
-
-* apache2\_module \- added workaround for new PHP module name\, from php7\_module
to php\_module
\([https\://github\.com/ansible\-collections/community\.general/pull/9951](https\://github\.com/ansible\-collections/community\.general/pull/9951)\)\.
-
-
-### Bugfixes
-
-* dependent look plugin \- make compatible with ansible\-core\'s Data Tagging feature \([https\://github\.com/ansible\-collections/community\.general/pull/9833](https\://github\.com/ansible\-collections/community\.general/pull/9833)\)\.
-* diy callback plugin \- make compatible with ansible\-core\'s Data Tagging feature \([https\://github\.com/ansible\-collections/community\.general/pull/9833](https\://github\.com/ansible\-collections/community\.general/pull/9833)\)\.
-* github\_deploy\_key \- check that key really exists on 422 to avoid masking other errors \([https\://github\.com/ansible\-collections/community\.general/issues/6718](https\://github\.com/ansible\-collections/community\.general/issues/6718)\, [https\://github\.com/ansible\-collections/community\.general/pull/10011](https\://github\.com/ansible\-collections/community\.general/pull/10011)\)\.
-* hashids and unicode\_normalize filter plugins \- avoid deprecated AnsibleFilterTypeError
on ansible\-core 2\.19 \([https\://github\.com/ansible\-collections/community\.general/pull/9992](https\://github\.com/ansible\-collections/community\.general/pull/9992)\)\.
-* keycloak\_authentication \- fix authentification config duplication for Keycloak \< 26\.2\.0 \([https\://github\.com/ansible\-collections/community\.general/pull/9987](https\://github\.com/ansible\-collections/community\.general/pull/9987)\)\.
-* keycloak\_client \- fix the idempotency regression by normalizing the Keycloak response for after\_client
\([https\://github\.com/ansible\-collections/community\.general/issues/9905](https\://github\.com/ansible\-collections/community\.general/issues/9905)\, [https\://github\.com/ansible\-collections/community\.general/pull/9976](https\://github\.com/ansible\-collections/community\.general/pull/9976)\)\.
-* proxmox inventory plugin \- fix ansible\_host
staying empty for certain Proxmox nodes \([https\://github\.com/ansible\-collections/community\.general/issues/5906](https\://github\.com/ansible\-collections/community\.general/issues/5906)\, [https\://github\.com/ansible\-collections/community\.general/pull/9952](https\://github\.com/ansible\-collections/community\.general/pull/9952)\)\.
-* proxmox\_disk \- fail gracefully if storage
is required but not provided by the user \([https\://github\.com/ansible\-collections/community\.general/issues/9941](https\://github\.com/ansible\-collections/community\.general/issues/9941)\, [https\://github\.com/ansible\-collections/community\.general/pull/9963](https\://github\.com/ansible\-collections/community\.general/pull/9963)\)\.
-* reveal\_ansible\_type filter plugin and ansible\_type test plugin \- make compatible with ansible\-core\'s Data Tagging feature \([https\://github\.com/ansible\-collections/community\.general/pull/9833](https\://github\.com/ansible\-collections/community\.general/pull/9833)\)\.
-* sysrc \- no longer always reporting changed\=true
when state\=absent
\. This fixes the method exists\(\)
\([https\://github\.com/ansible\-collections/community\.general/issues/10004](https\://github\.com/ansible\-collections/community\.general/issues/10004)\, [https\://github\.com/ansible\-collections/community\.general/pull/10005](https\://github\.com/ansible\-collections/community\.general/pull/10005)\)\.
-* yaml callback plugin \- use ansible\-core internals to avoid breakage with Data Tagging \([https\://github\.com/ansible\-collections/community\.general/pull/9833](https\://github\.com/ansible\-collections/community\.general/pull/9833)\)\.
-
-
-### Known Issues
-
-* reveal\_ansible\_type filter plugin and ansible\_type test plugin \- note that ansible\-core\'s Data Tagging feature implements new aliases\, such as \_AnsibleTaggedStr
for str
\, \_AnsibleTaggedInt
for int
\, and \_AnsibleTaggedFloat
for float
\([https\://github\.com/ansible\-collections/community\.general/pull/9833](https\://github\.com/ansible\-collections/community\.general/pull/9833)\)\.
-
-
-## v9\.5\.6
-
-
-### Release Summary
-
-Regular bugfix release\.
-
-
-### Minor Changes
-
-* consul\_token \- fix idempotency when policies
or roles
are supplied by name \([https\://github\.com/ansible\-collections/community\.general/issues/9841](https\://github\.com/ansible\-collections/community\.general/issues/9841)\, [https\://github\.com/ansible\-collections/community\.general/pull/9845](https\://github\.com/ansible\-collections/community\.general/pull/9845)\)\.
-
-
-### Bugfixes
-
-* cloudlare\_dns \- handle exhausted response stream in case of HTTP errors to show nice error message to the user \([https\://github\.com/ansible\-collections/community\.general/issues/9782](https\://github\.com/ansible\-collections/community\.general/issues/9782)\, [https\://github\.com/ansible\-collections/community\.general/pull/9818](https\://github\.com/ansible\-collections/community\.general/pull/9818)\)\.
-* dnf\_versionlock \- add support for dnf5 \([https\://github\.com/ansible\-collections/community\.general/issues/9556](https\://github\.com/ansible\-collections/community\.general/issues/9556)\)\.
-* homebrew\_cask \- handle unusual brew version strings \([https\://github\.com/ansible\-collections/community\.general/issues/8432](https\://github\.com/ansible\-collections/community\.general/issues/8432)\, [https\://github\.com/ansible\-collections/community\.general/pull/9881](https\://github\.com/ansible\-collections/community\.general/pull/9881)\)\.
-* ipa\_host \- module revoked existing host certificates even if user\_certificate
was not given \([https\://github\.com/ansible\-collections/community\.general/pull/9694](https\://github\.com/ansible\-collections/community\.general/pull/9694)\)\.
-* nmcli \- enable changing only the order of DNS servers or search suffixes \([https\://github\.com/ansible\-collections/community\.general/issues/8724](https\://github\.com/ansible\-collections/community\.general/issues/8724)\, [https\://github\.com/ansible\-collections/community\.general/pull/9880](https\://github\.com/ansible\-collections/community\.general/pull/9880)\)\.
-* proxmox\_vm\_info \- the module no longer expects that the key template
exists in a dictionary returned by Proxmox \([https\://github\.com/ansible\-collections/community\.general/issues/9875](https\://github\.com/ansible\-collections/community\.general/issues/9875)\, [https\://github\.com/ansible\-collections/community\.general/pull/9910](https\://github\.com/ansible\-collections/community\.general/pull/9910)\)\.
-* sudoers \- display stdout and stderr raised while failed validation \([https\://github\.com/ansible\-collections/community\.general/issues/9674](https\://github\.com/ansible\-collections/community\.general/issues/9674)\, [https\://github\.com/ansible\-collections/community\.general/pull/9871](https\://github\.com/ansible\-collections/community\.general/pull/9871)\)\.
-
-
-## v9\.5\.5
-
-
-### Release Summary
-
-Regular bugfix release\.
-
-
-### Bugfixes
-
-* apache2\_mod\_proxy \- make compatible with Python 3 \([https\://github\.com/ansible\-collections/community\.general/pull/9762](https\://github\.com/ansible\-collections/community\.general/pull/9762)\)\.
-* apache2\_mod\_proxy \- passing the cluster\'s page as referer for the member\'s pages\. This makes the module actually work again for halfway modern Apache versions\. According to some comments founds on the net the referer was required since at least 2019 for some versions of Apache 2 \([https\://github\.com/ansible\-collections/community\.general/pull/9762](https\://github\.com/ansible\-collections/community\.general/pull/9762)\)\.
-* cloudflare\_dns \- fix crash when deleting a DNS record or when updating a record with solo\=true
\([https\://github\.com/ansible\-collections/community\.general/issues/9652](https\://github\.com/ansible\-collections/community\.general/issues/9652)\, [https\://github\.com/ansible\-collections/community\.general/pull/9649](https\://github\.com/ansible\-collections/community\.general/pull/9649)\)\.
-* elasticsearch\_plugin \- fix ERROR\: D is not a recognized option
issue when configuring proxy settings \([https\://github\.com/ansible\-collections/community\.general/pull/9774](https\://github\.com/ansible\-collections/community\.general/pull/9774)\, [https\://github\.com/ansible\-collections/community\.general/issues/9773](https\://github\.com/ansible\-collections/community\.general/issues/9773)\)\.
-* keycloak\_client \- fix and improve existing tests\. The module showed a diff without actual changes\, solved by improving the normalise\_cr\(\)
function \([https\://github\.com/ansible\-collections/community\.general/pull/9644](https\://github\.com/ansible\-collections/community\.general/pull/9644)\)\.
-* keycloak\_client \- in check mode\, detect whether the lists in before client \(for example redirect URI list\) contain items that the lists in the desired client do not contain \([https\://github\.com/ansible\-collections/community\.general/pull/9739](https\://github\.com/ansible\-collections/community\.general/pull/9739)\)\.
-* passwordstore lookup plugin \- fix subkey creation even when create\=false
\([https\://github\.com/ansible\-collections/community\.general/issues/9105](https\://github\.com/ansible\-collections/community\.general/issues/9105)\, [https\://github\.com/ansible\-collections/community\.general/pull/9106](https\://github\.com/ansible\-collections/community\.general/pull/9106)\)\.
-* proxmox inventory plugin \- plugin did not update cache correctly after meta\: refresh\_inventory
\([https\://github\.com/ansible\-collections/community\.general/issues/9710](https\://github\.com/ansible\-collections/community\.general/issues/9710)\, [https\://github\.com/ansible\-collections/community\.general/pull/9760](https\://github\.com/ansible\-collections/community\.general/pull/9760)\)\.
-* redhat\_subscription \- use the \"enable\_content\" option \(when available\) when
- registering using D\-Bus\, to ensure that subscription\-manager enables the
- content on registration\; this is particular important on EL 10\+ and Fedora
- 41\+
- \([https\://github\.com/ansible\-collections/community\.general/pull/9778](https\://github\.com/ansible\-collections/community\.general/pull/9778)\)\.
-* xml \- ensure file descriptor is closed \([https\://github\.com/ansible\-collections/community\.general/pull/9695](https\://github\.com/ansible\-collections/community\.general/pull/9695)\)\.
-
-
-## v9\.5\.4
-
-
-### Security Fixes
-
-* keycloak\_client \- Sanitize saml\.encryption\.private\.key
so it does not show in the logs \([https\://github\.com/ansible\-collections/community\.general/pull/9621](https\://github\.com/ansible\-collections/community\.general/pull/9621)\)\.
-
-
-### Bugfixes
-
-* redhat\_subscription \- do not try to unsubscribe \(i\.e\. remove subscriptions\)
- when unregistering a system\: newer versions of subscription\-manager\, as
- available in EL 10 and Fedora 41\+\, do not support entitlements anymore\, and
- thus unsubscribing will fail
- \([https\://github\.com/ansible\-collections/community\.general/pull/9578](https\://github\.com/ansible\-collections/community\.general/pull/9578)\)\.
-
-
-## v9\.5\.3
-
-
-### Release Summary
-
-Regular bugfix release\.
-
-
-### Minor Changes
-
-* proxmox module utils \- add method api\_task\_complete
that can wait for task completion and return error message \([https\://github\.com/ansible\-collections/community\.general/pull/9256](https\://github\.com/ansible\-collections/community\.general/pull/9256)\)\.
-
-
-### Security Fixes
-
-* keycloak\_authentication \- API calls did not properly set the priority
during update resulting in incorrectly sorted authentication flows\. This apparently only affects Keycloak 25 or newer \([https\://github\.com/ansible\-collections/community\.general/pull/9263](https\://github\.com/ansible\-collections/community\.general/pull/9263)\)\.
-
-
-### Bugfixes
-
-* dig lookup plugin \- correctly handle NoNameserver
exception \([https\://github\.com/ansible\-collections/community\.general/pull/9363](https\://github\.com/ansible\-collections/community\.general/pull/9363)\, [https\://github\.com/ansible\-collections/community\.general/issues/9362](https\://github\.com/ansible\-collections/community\.general/issues/9362)\)\.
-* htpasswd \- report changes when file permissions are adjusted \([https\://github\.com/ansible\-collections/community\.general/issues/9485](https\://github\.com/ansible\-collections/community\.general/issues/9485)\, [https\://github\.com/ansible\-collections/community\.general/pull/9490](https\://github\.com/ansible\-collections/community\.general/pull/9490)\)\.
-* proxmox\_disk \- fix async method and make resize\_disk
method handle errors correctly \([https\://github\.com/ansible\-collections/community\.general/pull/9256](https\://github\.com/ansible\-collections/community\.general/pull/9256)\)\.
-* proxmox\_template \- fix the wrong path called on proxmox\_template\.task\_status
\([https\://github\.com/ansible\-collections/community\.general/issues/9276](https\://github\.com/ansible\-collections/community\.general/issues/9276)\, [https\://github\.com/ansible\-collections/community\.general/pull/9277](https\://github\.com/ansible\-collections/community\.general/pull/9277)\)\.
-* qubes connection plugin \- fix the printing of debug information \([https\://github\.com/ansible\-collections/community\.general/pull/9334](https\://github\.com/ansible\-collections/community\.general/pull/9334)\)\.
-* redfish\_utils module utils \- Fix VerifyBiosAttributes
command on multi system resource nodes \([https\://github\.com/ansible\-collections/community\.general/pull/9234](https\://github\.com/ansible\-collections/community\.general/pull/9234)\)\.
-
-
-## v9\.5\.2
-
-
-### Release Summary
-
-Regular bugfix release\.
-
-
-### Minor Changes
-
-* proxmox inventory plugin \- fix urllib3 InsecureRequestWarnings
not being suppressed when a token is used \([https\://github\.com/ansible\-collections/community\.general/pull/9099](https\://github\.com/ansible\-collections/community\.general/pull/9099)\)\.
-
-
-### Bugfixes
-
-* dnf\_config\_manager \- fix hanging when prompting to import GPG keys \([https\://github\.com/ansible\-collections/community\.general/pull/9124](https\://github\.com/ansible\-collections/community\.general/pull/9124)\, [https\://github\.com/ansible\-collections/community\.general/issues/8830](https\://github\.com/ansible\-collections/community\.general/issues/8830)\)\.
-* dnf\_config\_manager \- forces locale to C
before module starts\. If the locale was set to non\-English\, the output of the dnf config\-manager
could not be parsed \([https\://github\.com/ansible\-collections/community\.general/pull/9157](https\://github\.com/ansible\-collections/community\.general/pull/9157)\, [https\://github\.com/ansible\-collections/community\.general/issues/9046](https\://github\.com/ansible\-collections/community\.general/issues/9046)\)\.
-* flatpak \- force the locale language to C
when running the flatpak command \([https\://github\.com/ansible\-collections/community\.general/pull/9187](https\://github\.com/ansible\-collections/community\.general/pull/9187)\, [https\://github\.com/ansible\-collections/community\.general/issues/8883](https\://github\.com/ansible\-collections/community\.general/issues/8883)\)\.
-* github\_key \- in check mode\, a faulty call to \`datetime\.strftime\(\.\.\.\)\`
was being made which generated an exception \([https\://github\.com/ansible\-collections/community\.general/issues/9185](https\://github\.com/ansible\-collections/community\.general/issues/9185)\)\.
-* homebrew\_cask \- allow \+
symbol in Homebrew cask name validation regex \([https\://github\.com/ansible\-collections/community\.general/pull/9128](https\://github\.com/ansible\-collections/community\.general/pull/9128)\)\.
-* keycloak\_client \- fix diff by removing code that turns the attributes dict which contains additional settings into a list \([https\://github\.com/ansible\-collections/community\.general/pull/9077](https\://github\.com/ansible\-collections/community\.general/pull/9077)\)\.
-* keycloak\_clientscope \- fix diff and end\_state
by removing the code that turns the attributes dict\, which contains additional config items\, into a list \([https\://github\.com/ansible\-collections/community\.general/pull/9082](https\://github\.com/ansible\-collections/community\.general/pull/9082)\)\.
-* keycloak\_clientscope\_type \- sort the default and optional clientscope lists to improve the diff \([https\://github\.com/ansible\-collections/community\.general/pull/9202](https\://github\.com/ansible\-collections/community\.general/pull/9202)\)\.
-* redfish\_utils module utils \- remove undocumented default applytime \([https\://github\.com/ansible\-collections/community\.general/pull/9114](https\://github\.com/ansible\-collections/community\.general/pull/9114)\)\.
-* slack \- fail if Slack API response is not OK with error message \([https\://github\.com/ansible\-collections/community\.general/pull/9198](https\://github\.com/ansible\-collections/community\.general/pull/9198)\)\.
-
-
-## v9\.5\.1
-
-
-### Release Summary
-
-Regular bugfix release\.
-
-
-### Minor Changes
-
-* redfish\_utils module utils \- schedule a BIOS configuration job at next reboot when the BIOS config is changed \([https\://github\.com/ansible\-collections/community\.general/pull/9012](https\://github\.com/ansible\-collections/community\.general/pull/9012)\)\.
-
-
-### Bugfixes
-
-* bitwarden lookup plugin \- support BWS v0\.3\.0 syntax breaking change \([https\://github\.com/ansible\-collections/community\.general/pull/9028](https\://github\.com/ansible\-collections/community\.general/pull/9028)\)\.
-* collection\_version lookup plugin \- use importlib
directly instead of the deprecated and in ansible\-core 2\.19 removed ansible\.module\_utils\.compat\.importlib
\([https\://github\.com/ansible\-collections/community\.general/pull/9084](https\://github\.com/ansible\-collections/community\.general/pull/9084)\)\.
-* gitlab\_label \- update label\'s color \([https\://github\.com/ansible\-collections/community\.general/pull/9010](https\://github\.com/ansible\-collections/community\.general/pull/9010)\)\.
-* keycloak\_clientscope\_type \- fix detect changes in check mode \([https\://github\.com/ansible\-collections/community\.general/issues/9092](https\://github\.com/ansible\-collections/community\.general/issues/9092)\, [https\://github\.com/ansible\-collections/community\.general/pull/9093](https\://github\.com/ansible\-collections/community\.general/pull/9093)\)\.
-* keycloak\_group \- fix crash caused in subgroup creation\. The crash was caused by a missing or empty subGroups
property in Keycloak ≥23 \([https\://github\.com/ansible\-collections/community\.general/issues/8788](https\://github\.com/ansible\-collections/community\.general/issues/8788)\, [https\://github\.com/ansible\-collections/community\.general/pull/8979](https\://github\.com/ansible\-collections/community\.general/pull/8979)\)\.
-* modprobe \- fix check mode not being honored for persistent
option \([https\://github\.com/ansible\-collections/community\.general/issues/9051](https\://github\.com/ansible\-collections/community\.general/issues/9051)\, [https\://github\.com/ansible\-collections/community\.general/pull/9052](https\://github\.com/ansible\-collections/community\.general/pull/9052)\)\.
-* one\_host \- fix if statements for cases when ID\=0
\([https\://github\.com/ansible\-collections/community\.general/issues/1199](https\://github\.com/ansible\-collections/community\.general/issues/1199)\, [https\://github\.com/ansible\-collections/community\.general/pull/8907](https\://github\.com/ansible\-collections/community\.general/pull/8907)\)\.
-* one\_image \- fix module failing due to a class method typo \([https\://github\.com/ansible\-collections/community\.general/pull/9056](https\://github\.com/ansible\-collections/community\.general/pull/9056)\)\.
-* one\_image\_info \- fix module failing due to a class method typo \([https\://github\.com/ansible\-collections/community\.general/pull/9056](https\://github\.com/ansible\-collections/community\.general/pull/9056)\)\.
-* one\_vnet \- fix module failing due to a variable typo \([https\://github\.com/ansible\-collections/community\.general/pull/9019](https\://github\.com/ansible\-collections/community\.general/pull/9019)\)\.
-* redfish\_utils module utils \- fix issue with URI parsing to gracefully handling trailing slashes when extracting member identifiers \([https\://github\.com/ansible\-collections/community\.general/issues/9047](https\://github\.com/ansible\-collections/community\.general/issues/9047)\, [https\://github\.com/ansible\-collections/community\.general/pull/9057](https\://github\.com/ansible\-collections/community\.general/pull/9057)\)\.
-
-
-## v9\.5\.0
-
-
-### Release Summary
-
-Regular bugfix and feature release\.
-
-Please note that this is the last feature release for community\.general 9\.x\.y\.
-From now on\, new features will only go into community\.general 10\.x\.y\.
-
-
-### Minor Changes
-
-* dig lookup plugin \- add port
option to specify DNS server port \([https\://github\.com/ansible\-collections/community\.general/pull/8966](https\://github\.com/ansible\-collections/community\.general/pull/8966)\)\.
-* flatpak \- improve the parsing of Flatpak application IDs based on official guidelines \([https\://github\.com/ansible\-collections/community\.general/pull/8909](https\://github\.com/ansible\-collections/community\.general/pull/8909)\)\.
-* gio\_mime \- adjust code ahead of the old VardDict
deprecation \([https\://github\.com/ansible\-collections/community\.general/pull/8855](https\://github\.com/ansible\-collections/community\.general/pull/8855)\)\.
-* gitlab\_deploy\_key \- better construct when using dict\.items\(\)
\([https\://github\.com/ansible\-collections/community\.general/pull/8876](https\://github\.com/ansible\-collections/community\.general/pull/8876)\)\.
-* gitlab\_group \- add many new parameters \([https\://github\.com/ansible\-collections/community\.general/pull/8908](https\://github\.com/ansible\-collections/community\.general/pull/8908)\)\.
-* gitlab\_group \- better construct when using dict\.items\(\)
\([https\://github\.com/ansible\-collections/community\.general/pull/8876](https\://github\.com/ansible\-collections/community\.general/pull/8876)\)\.
-* gitlab\_issue \- better construct when using dict\.items\(\)
\([https\://github\.com/ansible\-collections/community\.general/pull/8876](https\://github\.com/ansible\-collections/community\.general/pull/8876)\)\.
-* gitlab\_merge\_request \- better construct when using dict\.items\(\)
\([https\://github\.com/ansible\-collections/community\.general/pull/8876](https\://github\.com/ansible\-collections/community\.general/pull/8876)\)\.
-* gitlab\_runner \- better construct when using dict\.items\(\)
\([https\://github\.com/ansible\-collections/community\.general/pull/8876](https\://github\.com/ansible\-collections/community\.general/pull/8876)\)\.
-* icinga2\_host \- replace loop with dict comprehension \([https\://github\.com/ansible\-collections/community\.general/pull/8876](https\://github\.com/ansible\-collections/community\.general/pull/8876)\)\.
-* jira \- adjust code ahead of the old VardDict
deprecation \([https\://github\.com/ansible\-collections/community\.general/pull/8856](https\://github\.com/ansible\-collections/community\.general/pull/8856)\)\.
-* keycloak\_client \- add client\-x509
choice to client\_authenticator\_type
\([https\://github\.com/ansible\-collections/community\.general/pull/8973](https\://github\.com/ansible\-collections/community\.general/pull/8973)\)\.
-* keycloak\_user\_federation \- add the user federation config parameter referral
to the module arguments \([https\://github\.com/ansible\-collections/community\.general/pull/8954](https\://github\.com/ansible\-collections/community\.general/pull/8954)\)\.
-* memset\_dns\_reload \- replace loop with dict\(\)
\([https\://github\.com/ansible\-collections/community\.general/pull/8876](https\://github\.com/ansible\-collections/community\.general/pull/8876)\)\.
-* memset\_memstore\_info \- replace loop with dict\(\)
\([https\://github\.com/ansible\-collections/community\.general/pull/8876](https\://github\.com/ansible\-collections/community\.general/pull/8876)\)\.
-* memset\_server\_info \- replace loop with dict\(\)
\([https\://github\.com/ansible\-collections/community\.general/pull/8876](https\://github\.com/ansible\-collections/community\.general/pull/8876)\)\.
-* memset\_zone \- replace loop with dict\(\)
\([https\://github\.com/ansible\-collections/community\.general/pull/8876](https\://github\.com/ansible\-collections/community\.general/pull/8876)\)\.
-* memset\_zone\_domain \- replace loop with dict\(\)
\([https\://github\.com/ansible\-collections/community\.general/pull/8876](https\://github\.com/ansible\-collections/community\.general/pull/8876)\)\.
-* memset\_zone\_record \- replace loop with dict\(\)
\([https\://github\.com/ansible\-collections/community\.general/pull/8876](https\://github\.com/ansible\-collections/community\.general/pull/8876)\)\.
-* nmcli \- add conn\_enable
param to reload connection \([https\://github\.com/ansible\-collections/community\.general/issues/3752](https\://github\.com/ansible\-collections/community\.general/issues/3752)\, [https\://github\.com/ansible\-collections/community\.general/issues/8704](https\://github\.com/ansible\-collections/community\.general/issues/8704)\, [https\://github\.com/ansible\-collections/community\.general/pull/8897](https\://github\.com/ansible\-collections/community\.general/pull/8897)\)\.
-* nmcli \- add state\=up
and state\=down
to enable/disable connections \([https\://github\.com/ansible\-collections/community\.general/issues/3752](https\://github\.com/ansible\-collections/community\.general/issues/3752)\, [https\://github\.com/ansible\-collections/community\.general/issues/8704](https\://github\.com/ansible\-collections/community\.general/issues/8704)\, [https\://github\.com/ansible\-collections/community\.general/issues/7152](https\://github\.com/ansible\-collections/community\.general/issues/7152)\, [https\://github\.com/ansible\-collections/community\.general/pull/8897](https\://github\.com/ansible\-collections/community\.general/pull/8897)\)\.
-* nmcli \- better construct when using dict\.items\(\)
\([https\://github\.com/ansible\-collections/community\.general/pull/8876](https\://github\.com/ansible\-collections/community\.general/pull/8876)\)\.
-* npm \- add force
parameter to allow \-\-force
\([https\://github\.com/ansible\-collections/community\.general/pull/8885](https\://github\.com/ansible\-collections/community\.general/pull/8885)\)\.
-* one\_image \- add option persistent
to manage image persistence \([https\://github\.com/ansible\-collections/community\.general/issues/3578](https\://github\.com/ansible\-collections/community\.general/issues/3578)\, [https\://github\.com/ansible\-collections/community\.general/pull/8889](https\://github\.com/ansible\-collections/community\.general/pull/8889)\)\.
-* one\_image \- extend xsd scheme to make it return a lot more info about image \([https\://github\.com/ansible\-collections/community\.general/pull/8889](https\://github\.com/ansible\-collections/community\.general/pull/8889)\)\.
-* one\_image \- refactor code to make it more similar to one\_template
and one\_vnet
\([https\://github\.com/ansible\-collections/community\.general/pull/8889](https\://github\.com/ansible\-collections/community\.general/pull/8889)\)\.
-* one\_image\_info \- extend xsd scheme to make it return a lot more info about image \([https\://github\.com/ansible\-collections/community\.general/pull/8889](https\://github\.com/ansible\-collections/community\.general/pull/8889)\)\.
-* one\_image\_info \- refactor code to make it more similar to one\_template
and one\_vnet
\([https\://github\.com/ansible\-collections/community\.general/pull/8889](https\://github\.com/ansible\-collections/community\.general/pull/8889)\)\.
-* open\_iscsi \- allow login to a portal with multiple targets without specifying any of them \([https\://github\.com/ansible\-collections/community\.general/pull/8719](https\://github\.com/ansible\-collections/community\.general/pull/8719)\)\.
-* opennebula\.py \- add VM id
and VM host
to inventory host data \([https\://github\.com/ansible\-collections/community\.general/pull/8532](https\://github\.com/ansible\-collections/community\.general/pull/8532)\)\.
-* passwordstore lookup plugin \- add subkey creation/update support \([https\://github\.com/ansible\-collections/community\.general/pull/8952](https\://github\.com/ansible\-collections/community\.general/pull/8952)\)\.
-* proxmox inventory plugin \- clean up authentication code \([https\://github\.com/ansible\-collections/community\.general/pull/8917](https\://github\.com/ansible\-collections/community\.general/pull/8917)\)\.
-* redfish\_command \- add handling of the PasswordChangeRequired
message from services in the UpdateUserPassword
command to directly modify the user\'s password if the requested user is the one invoking the operation \([https\://github\.com/ansible\-collections/community\.general/issues/8652](https\://github\.com/ansible\-collections/community\.general/issues/8652)\, [https\://github\.com/ansible\-collections/community\.general/pull/8653](https\://github\.com/ansible\-collections/community\.general/pull/8653)\)\.
-* redfish\_confg \- remove CapacityBytes
from required paramaters of the CreateVolume
command \([https\://github\.com/ansible\-collections/community\.general/pull/8956](https\://github\.com/ansible\-collections/community\.general/pull/8956)\)\.
-* redfish\_config \- add parameter storage\_none\_volume\_deletion
to CreateVolume
command in order to control the automatic deletion of non\-RAID volumes \([https\://github\.com/ansible\-collections/community\.general/pull/8990](https\://github\.com/ansible\-collections/community\.general/pull/8990)\)\.
-* redfish\_info \- adds RedfishURI
and StorageId
to Disk inventory \([https\://github\.com/ansible\-collections/community\.general/pull/8937](https\://github\.com/ansible\-collections/community\.general/pull/8937)\)\.
-* scaleway\_container \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8858](https\://github\.com/ansible\-collections/community\.general/pull/8858)\)\.
-* scaleway\_container\_info \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8858](https\://github\.com/ansible\-collections/community\.general/pull/8858)\)\.
-* scaleway\_container\_namespace \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8858](https\://github\.com/ansible\-collections/community\.general/pull/8858)\)\.
-* scaleway\_container\_namespace\_info \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8858](https\://github\.com/ansible\-collections/community\.general/pull/8858)\)\.
-* scaleway\_container\_registry \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8858](https\://github\.com/ansible\-collections/community\.general/pull/8858)\)\.
-* scaleway\_container\_registry\_info \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8858](https\://github\.com/ansible\-collections/community\.general/pull/8858)\)\.
-* scaleway\_function \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8858](https\://github\.com/ansible\-collections/community\.general/pull/8858)\)\.
-* scaleway\_function\_info \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8858](https\://github\.com/ansible\-collections/community\.general/pull/8858)\)\.
-* scaleway\_function\_namespace \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8858](https\://github\.com/ansible\-collections/community\.general/pull/8858)\)\.
-* scaleway\_function\_namespace\_info \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8858](https\://github\.com/ansible\-collections/community\.general/pull/8858)\)\.
-* scaleway\_user\_data \- better construct when using dict\.items\(\)
\([https\://github\.com/ansible\-collections/community\.general/pull/8876](https\://github\.com/ansible\-collections/community\.general/pull/8876)\)\.
-* udm\_dns\_record \- replace loop with dict\.update\(\)
\([https\://github\.com/ansible\-collections/community\.general/pull/8876](https\://github\.com/ansible\-collections/community\.general/pull/8876)\)\.
-
-
-### Deprecated Features
-
-* hipchat \- the hipchat service has been discontinued and the self\-hosted variant has been End of Life since 2020\. The module is therefore deprecated and will be removed from community\.general 11\.0\.0 if nobody provides compelling reasons to still keep it \([https\://github\.com/ansible\-collections/community\.general/pull/8919](https\://github\.com/ansible\-collections/community\.general/pull/8919)\)\.
-
-
-### Bugfixes
-
-* cloudflare\_dns \- fix changing Cloudflare SRV records \([https\://github\.com/ansible\-collections/community\.general/issues/8679](https\://github\.com/ansible\-collections/community\.general/issues/8679)\, [https\://github\.com/ansible\-collections/community\.general/pull/8948](https\://github\.com/ansible\-collections/community\.general/pull/8948)\)\.
-* cmd\_runner module utils \- call to get\_best\_parsable\_locales\(\)
was missing parameter \([https\://github\.com/ansible\-collections/community\.general/pull/8929](https\://github\.com/ansible\-collections/community\.general/pull/8929)\)\.
-* dig lookup plugin \- fix using only the last nameserver specified \([https\://github\.com/ansible\-collections/community\.general/pull/8970](https\://github\.com/ansible\-collections/community\.general/pull/8970)\)\.
-* django\_command \- option command
is now split lexically before passed to underlying PythonRunner \([https\://github\.com/ansible\-collections/community\.general/pull/8944](https\://github\.com/ansible\-collections/community\.general/pull/8944)\)\.
-* homectl \- the module now tries to use legacycrypt
on Python 3\.13\+ \([https\://github\.com/ansible\-collections/community\.general/issues/4691](https\://github\.com/ansible\-collections/community\.general/issues/4691)\, [https\://github\.com/ansible\-collections/community\.general/pull/8987](https\://github\.com/ansible\-collections/community\.general/pull/8987)\)\.
-* ini\_file \- pass absolute paths to module\.atomic\_move\(\)
\([https\://github\.com/ansible/ansible/issues/83950](https\://github\.com/ansible/ansible/issues/83950)\, [https\://github\.com/ansible\-collections/community\.general/pull/8925](https\://github\.com/ansible\-collections/community\.general/pull/8925)\)\.
-* ipa\_host \- add force\_create
\, fix enabled
and disabled
states \([https\://github\.com/ansible\-collections/community\.general/issues/1094](https\://github\.com/ansible\-collections/community\.general/issues/1094)\, [https\://github\.com/ansible\-collections/community\.general/pull/8920](https\://github\.com/ansible\-collections/community\.general/pull/8920)\)\.
-* ipa\_hostgroup \- fix enabled \`\` and \`\`disabled
states \([https\://github\.com/ansible\-collections/community\.general/issues/8408](https\://github\.com/ansible\-collections/community\.general/issues/8408)\, [https\://github\.com/ansible\-collections/community\.general/pull/8900](https\://github\.com/ansible\-collections/community\.general/pull/8900)\)\.
-* java\_keystore \- pass absolute paths to module\.atomic\_move\(\)
\([https\://github\.com/ansible/ansible/issues/83950](https\://github\.com/ansible/ansible/issues/83950)\, [https\://github\.com/ansible\-collections/community\.general/pull/8925](https\://github\.com/ansible\-collections/community\.general/pull/8925)\)\.
-* jenkins\_plugin \- pass absolute paths to module\.atomic\_move\(\)
\([https\://github\.com/ansible/ansible/issues/83950](https\://github\.com/ansible/ansible/issues/83950)\, [https\://github\.com/ansible\-collections/community\.general/pull/8925](https\://github\.com/ansible\-collections/community\.general/pull/8925)\)\.
-* kdeconfig \- pass absolute paths to module\.atomic\_move\(\)
\([https\://github\.com/ansible/ansible/issues/83950](https\://github\.com/ansible/ansible/issues/83950)\, [https\://github\.com/ansible\-collections/community\.general/pull/8925](https\://github\.com/ansible\-collections/community\.general/pull/8925)\)\.
-* keycloak\_realm \- fix change detection in check mode by sorting the lists in the realms beforehand \([https\://github\.com/ansible\-collections/community\.general/pull/8877](https\://github\.com/ansible\-collections/community\.general/pull/8877)\)\.
-* keycloak\_user\_federation \- add module argument allowing users to configure the update mode for the parameter bindCredential
\([https\://github\.com/ansible\-collections/community\.general/pull/8898](https\://github\.com/ansible\-collections/community\.general/pull/8898)\)\.
-* keycloak\_user\_federation \- minimize change detection by setting krbPrincipalAttribute
to \'\'
in Keycloak responses if missing \([https\://github\.com/ansible\-collections/community\.general/pull/8785](https\://github\.com/ansible\-collections/community\.general/pull/8785)\)\.
-* keycloak\_user\_federation \- remove lastSync
parameter from Keycloak responses to minimize diff/changes \([https\://github\.com/ansible\-collections/community\.general/pull/8812](https\://github\.com/ansible\-collections/community\.general/pull/8812)\)\.
-* keycloak\_userprofile \- fix empty response when fetching userprofile component by removing parent\=parent\_id
filter \([https\://github\.com/ansible\-collections/community\.general/pull/8923](https\://github\.com/ansible\-collections/community\.general/pull/8923)\)\.
-* keycloak\_userprofile \- improve diff by deserializing the fetched kc\.user\.profile\.config
and serialize it only when sending back \([https\://github\.com/ansible\-collections/community\.general/pull/8940](https\://github\.com/ansible\-collections/community\.general/pull/8940)\)\.
-* lxd\_container \- fix bug introduced in previous commit \([https\://github\.com/ansible\-collections/community\.general/pull/8895](https\://github\.com/ansible\-collections/community\.general/pull/8895)\, [https\://github\.com/ansible\-collections/community\.general/issues/8888](https\://github\.com/ansible\-collections/community\.general/issues/8888)\)\.
-* one\_service \- fix service creation after it was deleted with unique
parameter \([https\://github\.com/ansible\-collections/community\.general/issues/3137](https\://github\.com/ansible\-collections/community\.general/issues/3137)\, [https\://github\.com/ansible\-collections/community\.general/pull/8887](https\://github\.com/ansible\-collections/community\.general/pull/8887)\)\.
-* pam\_limits \- pass absolute paths to module\.atomic\_move\(\)
\([https\://github\.com/ansible/ansible/issues/83950](https\://github\.com/ansible/ansible/issues/83950)\, [https\://github\.com/ansible\-collections/community\.general/pull/8925](https\://github\.com/ansible\-collections/community\.general/pull/8925)\)\.
-* python\_runner module utils \- parameter path\_prefix
was being handled as string when it should be a list \([https\://github\.com/ansible\-collections/community\.general/pull/8944](https\://github\.com/ansible\-collections/community\.general/pull/8944)\)\.
-* udm\_user \- the module now tries to use legacycrypt
on Python 3\.13\+ \([https\://github\.com/ansible\-collections/community\.general/issues/4690](https\://github\.com/ansible\-collections/community\.general/issues/4690)\, [https\://github\.com/ansible\-collections/community\.general/pull/8987](https\://github\.com/ansible\-collections/community\.general/pull/8987)\)\.
-
-
-### New Modules
-
-* community\.general\.ipa\_getkeytab \- Manage keytab file in FreeIPA\.
-
-
-## v9\.4\.0
-
-
-### Release Summary
-
-Bugfix and feature release\.
-
-
-### Minor Changes
-
-* MH module utils \- add parameter when
to cause\_changes
decorator \([https\://github\.com/ansible\-collections/community\.general/pull/8766](https\://github\.com/ansible\-collections/community\.general/pull/8766)\)\.
-* MH module utils \- minor refactor in decorators \([https\://github\.com/ansible\-collections/community\.general/pull/8766](https\://github\.com/ansible\-collections/community\.general/pull/8766)\)\.
-* alternatives \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8833](https\://github\.com/ansible\-collections/community\.general/pull/8833)\)\.
-* apache2\_mod\_proxy \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8814](https\://github\.com/ansible\-collections/community\.general/pull/8814)\)\.
-* apache2\_mod\_proxy \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8833](https\://github\.com/ansible\-collections/community\.general/pull/8833)\)\.
-* consul\_acl \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8833](https\://github\.com/ansible\-collections/community\.general/pull/8833)\)\.
-* copr \- Added includepkgs
and excludepkgs
parameters to limit the list of packages fetched or excluded from the repository\([https\://github\.com/ansible\-collections/community\.general/pull/8779](https\://github\.com/ansible\-collections/community\.general/pull/8779)\)\.
-* credstash lookup plugin \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8822](https\://github\.com/ansible\-collections/community\.general/pull/8822)\)\.
-* csv module utils \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8814](https\://github\.com/ansible\-collections/community\.general/pull/8814)\)\.
-* deco MH module utils \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8822](https\://github\.com/ansible\-collections/community\.general/pull/8822)\)\.
-* etcd3 \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8822](https\://github\.com/ansible\-collections/community\.general/pull/8822)\)\.
-* gio\_mime \- mute the old VarDict
deprecation \([https\://github\.com/ansible\-collections/community\.general/pull/8776](https\://github\.com/ansible\-collections/community\.general/pull/8776)\)\.
-* gitlab\_group \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8814](https\://github\.com/ansible\-collections/community\.general/pull/8814)\)\.
-* gitlab\_project \- add option issues\_access\_level
to enable/disable project issues \([https\://github\.com/ansible\-collections/community\.general/pull/8760](https\://github\.com/ansible\-collections/community\.general/pull/8760)\)\.
-* gitlab\_project \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8822](https\://github\.com/ansible\-collections/community\.general/pull/8822)\)\.
-* gitlab\_project \- sorted parameters in order to avoid future merge conflicts \([https\://github\.com/ansible\-collections/community\.general/pull/8759](https\://github\.com/ansible\-collections/community\.general/pull/8759)\)\.
-* hashids filter plugin \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8814](https\://github\.com/ansible\-collections/community\.general/pull/8814)\)\.
-* hwc\_ecs\_instance \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8822](https\://github\.com/ansible\-collections/community\.general/pull/8822)\)\.
-* hwc\_evs\_disk \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8822](https\://github\.com/ansible\-collections/community\.general/pull/8822)\)\.
-* hwc\_vpc\_eip \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8822](https\://github\.com/ansible\-collections/community\.general/pull/8822)\)\.
-* hwc\_vpc\_peering\_connect \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8822](https\://github\.com/ansible\-collections/community\.general/pull/8822)\)\.
-* hwc\_vpc\_port \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8822](https\://github\.com/ansible\-collections/community\.general/pull/8822)\)\.
-* hwc\_vpc\_subnet \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8822](https\://github\.com/ansible\-collections/community\.general/pull/8822)\)\.
-* imc\_rest \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8833](https\://github\.com/ansible\-collections/community\.general/pull/8833)\)\.
-* ipa\_otptoken \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8822](https\://github\.com/ansible\-collections/community\.general/pull/8822)\)\.
-* jira \- mute the old VarDict
deprecation \([https\://github\.com/ansible\-collections/community\.general/pull/8776](https\://github\.com/ansible\-collections/community\.general/pull/8776)\)\.
-* jira \- replace deprecated params when using decorator cause\_changes
\([https\://github\.com/ansible\-collections/community\.general/pull/8791](https\://github\.com/ansible\-collections/community\.general/pull/8791)\)\.
-* keep\_keys filter plugin \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8814](https\://github\.com/ansible\-collections/community\.general/pull/8814)\)\.
-* keycloak module utils \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8822](https\://github\.com/ansible\-collections/community\.general/pull/8822)\)\.
-* keycloak\_client \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8814](https\://github\.com/ansible\-collections/community\.general/pull/8814)\)\.
-* keycloak\_clientscope \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8814](https\://github\.com/ansible\-collections/community\.general/pull/8814)\)\.
-* keycloak\_identity\_provider \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8814](https\://github\.com/ansible\-collections/community\.general/pull/8814)\)\.
-* keycloak\_user\_federation \- add module argument allowing users to optout of the removal of unspecified mappers\, for example to keep the keycloak default mappers \([https\://github\.com/ansible\-collections/community\.general/pull/8764](https\://github\.com/ansible\-collections/community\.general/pull/8764)\)\.
-* keycloak\_user\_federation \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8814](https\://github\.com/ansible\-collections/community\.general/pull/8814)\)\.
-* keycloak\_user\_federation \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8822](https\://github\.com/ansible\-collections/community\.general/pull/8822)\)\.
-* keycloak\_user\_federation \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8833](https\://github\.com/ansible\-collections/community\.general/pull/8833)\)\.
-* linode \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8814](https\://github\.com/ansible\-collections/community\.general/pull/8814)\)\.
-* lxc\_container \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8822](https\://github\.com/ansible\-collections/community\.general/pull/8822)\)\.
-* lxd\_container \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8814](https\://github\.com/ansible\-collections/community\.general/pull/8814)\)\.
-* manageiq\_provider \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8814](https\://github\.com/ansible\-collections/community\.general/pull/8814)\)\.
-* ocapi\_utils \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8833](https\://github\.com/ansible\-collections/community\.general/pull/8833)\)\.
-* one\_service \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8814](https\://github\.com/ansible\-collections/community\.general/pull/8814)\)\.
-* one\_vm \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8814](https\://github\.com/ansible\-collections/community\.general/pull/8814)\)\.
-* onepassword lookup plugin \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8833](https\://github\.com/ansible\-collections/community\.general/pull/8833)\)\.
-* pids \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8833](https\://github\.com/ansible\-collections/community\.general/pull/8833)\)\.
-* pipx \- added new states install\_all
\, uninject
\, upgrade\_shared
\, pin
\, and unpin
\([https\://github\.com/ansible\-collections/community\.general/pull/8809](https\://github\.com/ansible\-collections/community\.general/pull/8809)\)\.
-* pipx \- added parameter global
to module \([https\://github\.com/ansible\-collections/community\.general/pull/8793](https\://github\.com/ansible\-collections/community\.general/pull/8793)\)\.
-* pipx \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8833](https\://github\.com/ansible\-collections/community\.general/pull/8833)\)\.
-* pipx\_info \- added parameter global
to module \([https\://github\.com/ansible\-collections/community\.general/pull/8793](https\://github\.com/ansible\-collections/community\.general/pull/8793)\)\.
-* pipx\_info \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8833](https\://github\.com/ansible\-collections/community\.general/pull/8833)\)\.
-* pkg5\_publisher \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8833](https\://github\.com/ansible\-collections/community\.general/pull/8833)\)\.
-* proxmox \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8814](https\://github\.com/ansible\-collections/community\.general/pull/8814)\)\.
-* proxmox\_disk \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8814](https\://github\.com/ansible\-collections/community\.general/pull/8814)\)\.
-* proxmox\_kvm \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8814](https\://github\.com/ansible\-collections/community\.general/pull/8814)\)\.
-* proxmox\_kvm \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8822](https\://github\.com/ansible\-collections/community\.general/pull/8822)\)\.
-* redfish\_utils \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8833](https\://github\.com/ansible\-collections/community\.general/pull/8833)\)\.
-* redfish\_utils module utils \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8822](https\://github\.com/ansible\-collections/community\.general/pull/8822)\)\.
-* redis cache plugin \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8833](https\://github\.com/ansible\-collections/community\.general/pull/8833)\)\.
-* remove\_keys filter plugin \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8814](https\://github\.com/ansible\-collections/community\.general/pull/8814)\)\.
-* replace\_keys filter plugin \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8814](https\://github\.com/ansible\-collections/community\.general/pull/8814)\)\.
-* scaleway \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8833](https\://github\.com/ansible\-collections/community\.general/pull/8833)\)\.
-* scaleway module utils \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8822](https\://github\.com/ansible\-collections/community\.general/pull/8822)\)\.
-* scaleway\_compute \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8833](https\://github\.com/ansible\-collections/community\.general/pull/8833)\)\.
-* scaleway\_ip \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8833](https\://github\.com/ansible\-collections/community\.general/pull/8833)\)\.
-* scaleway\_lb \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8833](https\://github\.com/ansible\-collections/community\.general/pull/8833)\)\.
-* scaleway\_security\_group \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8822](https\://github\.com/ansible\-collections/community\.general/pull/8822)\)\.
-* scaleway\_security\_group \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8833](https\://github\.com/ansible\-collections/community\.general/pull/8833)\)\.
-* scaleway\_user\_data \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8833](https\://github\.com/ansible\-collections/community\.general/pull/8833)\)\.
-* sensu\_silence \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8833](https\://github\.com/ansible\-collections/community\.general/pull/8833)\)\.
-* snmp\_facts \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8833](https\://github\.com/ansible\-collections/community\.general/pull/8833)\)\.
-* sorcery \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8833](https\://github\.com/ansible\-collections/community\.general/pull/8833)\)\.
-* ufw \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8822](https\://github\.com/ansible\-collections/community\.general/pull/8822)\)\.
-* unsafe plugin utils \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8814](https\://github\.com/ansible\-collections/community\.general/pull/8814)\)\.
-* vardict module utils \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8814](https\://github\.com/ansible\-collections/community\.general/pull/8814)\)\.
-* vars MH module utils \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8814](https\://github\.com/ansible\-collections/community\.general/pull/8814)\)\.
-* vmadm \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8822](https\://github\.com/ansible\-collections/community\.general/pull/8822)\)\.
-
-
-### Deprecated Features
-
-* MH decorator cause\_changes module utils \- deprecate parameters on\_success
and on\_failure
\([https\://github\.com/ansible\-collections/community\.general/pull/8791](https\://github\.com/ansible\-collections/community\.general/pull/8791)\)\.
-* pipx \- support for versions of the command line tool pipx
older than 1\.7\.0
is deprecated and will be removed in community\.general 11\.0\.0 \([https\://github\.com/ansible\-collections/community\.general/pull/8793](https\://github\.com/ansible\-collections/community\.general/pull/8793)\)\.
-* pipx\_info \- support for versions of the command line tool pipx
older than 1\.7\.0
is deprecated and will be removed in community\.general 11\.0\.0 \([https\://github\.com/ansible\-collections/community\.general/pull/8793](https\://github\.com/ansible\-collections/community\.general/pull/8793)\)\.
-
-
-### Bugfixes
-
-* gitlab\_group\_access\_token \- fix crash in check mode caused by attempted access to a newly created access token \([https\://github\.com/ansible\-collections/community\.general/pull/8796](https\://github\.com/ansible\-collections/community\.general/pull/8796)\)\.
-* gitlab\_project \- fix container\_expiration\_policy
not being applied when creating a new project \([https\://github\.com/ansible\-collections/community\.general/pull/8790](https\://github\.com/ansible\-collections/community\.general/pull/8790)\)\.
-* gitlab\_project \- fix crash caused by old Gitlab projects not having a container\_expiration\_policy
attribute \([https\://github\.com/ansible\-collections/community\.general/pull/8790](https\://github\.com/ansible\-collections/community\.general/pull/8790)\)\.
-* gitlab\_project\_access\_token \- fix crash in check mode caused by attempted access to a newly created access token \([https\://github\.com/ansible\-collections/community\.general/pull/8796](https\://github\.com/ansible\-collections/community\.general/pull/8796)\)\.
-* keycloak\_realm\_key \- fix invalid usage of parent\_id
\([https\://github\.com/ansible\-collections/community\.general/issues/7850](https\://github\.com/ansible\-collections/community\.general/issues/7850)\, [https\://github\.com/ansible\-collections/community\.general/pull/8823](https\://github\.com/ansible\-collections/community\.general/pull/8823)\)\.
-* keycloak\_user\_federation \- fix key error when removing mappers during an update and new mappers are specified in the module args \([https\://github\.com/ansible\-collections/community\.general/pull/8762](https\://github\.com/ansible\-collections/community\.general/pull/8762)\)\.
-* keycloak\_user\_federation \- fix the UnboundLocalError
that occurs when an ID is provided for a user federation mapper \([https\://github\.com/ansible\-collections/community\.general/pull/8831](https\://github\.com/ansible\-collections/community\.general/pull/8831)\)\.
-* keycloak\_user\_federation \- sort desired and after mapper list by name \(analog to before mapper list\) to minimize diff and make change detection more accurate \([https\://github\.com/ansible\-collections/community\.general/pull/8761](https\://github\.com/ansible\-collections/community\.general/pull/8761)\)\.
-* proxmox inventory plugin \- fixed a possible error on concatenating responses from proxmox\. In case an API call unexpectedly returned an empty result\, the inventory failed with a fatal error\. Added check for empty response \([https\://github\.com/ansible\-collections/community\.general/issues/8798](https\://github\.com/ansible\-collections/community\.general/issues/8798)\, [https\://github\.com/ansible\-collections/community\.general/pull/8794](https\://github\.com/ansible\-collections/community\.general/pull/8794)\)\.
-
-
-### New Modules
-
-* community\.general\.keycloak\_userprofile \- Allows managing Keycloak User Profiles\.
-* community\.general\.one\_vnet \- Manages OpenNebula virtual networks\.
-
-
-## v9\.3\.0
-
-
-### Release Summary
-
-Regular bugfix and feature release\.
-
-
-### Minor Changes
-
-* cgroup\_memory\_recap\, hipchat\, jabber\, log\_plays\, loganalytics\, logentries\, logstash\, slack\, splunk\, sumologic\, syslog\_json callback plugins \- make sure that all options are typed \([https\://github\.com/ansible\-collections/community\.general/pull/8628](https\://github\.com/ansible\-collections/community\.general/pull/8628)\)\.
-* chef\_databag\, consul\_kv\, cyberarkpassword\, dsv\, etcd\, filetree\, hiera\, onepassword\, onepassword\_doc\, onepassword\_raw\, passwordstore\, redis\, shelvefile\, tss lookup plugins \- make sure that all options are typed \([https\://github\.com/ansible\-collections/community\.general/pull/8626](https\://github\.com/ansible\-collections/community\.general/pull/8626)\)\.
-* chroot\, funcd\, incus\, iocage\, jail\, lxc\, lxd\, qubes\, zone connection plugins \- make sure that all options are typed \([https\://github\.com/ansible\-collections/community\.general/pull/8627](https\://github\.com/ansible\-collections/community\.general/pull/8627)\)\.
-* cobbler\, linode\, lxd\, nmap\, online\, scaleway\, stackpath\_compute\, virtualbox inventory plugins \- make sure that all options are typed \([https\://github\.com/ansible\-collections/community\.general/pull/8625](https\://github\.com/ansible\-collections/community\.general/pull/8625)\)\.
-* doas\, dzdo\, ksu\, machinectl\, pbrun\, pfexec\, pmrun\, sesu\, sudosu become plugins \- make sure that all options are typed \([https\://github\.com/ansible\-collections/community\.general/pull/8623](https\://github\.com/ansible\-collections/community\.general/pull/8623)\)\.
-* gconftool2 \- make use of ModuleHelper
features to simplify code \([https\://github\.com/ansible\-collections/community\.general/pull/8711](https\://github\.com/ansible\-collections/community\.general/pull/8711)\)\.
-* gitlab\_project \- add option container\_expiration\_policy
to schedule container registry cleanup \([https\://github\.com/ansible\-collections/community\.general/pull/8674](https\://github\.com/ansible\-collections/community\.general/pull/8674)\)\.
-* gitlab\_project \- add option model\_registry\_access\_level
to disable model registry \([https\://github\.com/ansible\-collections/community\.general/pull/8688](https\://github\.com/ansible\-collections/community\.general/pull/8688)\)\.
-* gitlab\_project \- add option pages\_access\_level
to disable project pages \([https\://github\.com/ansible\-collections/community\.general/pull/8688](https\://github\.com/ansible\-collections/community\.general/pull/8688)\)\.
-* gitlab\_project \- add option repository\_access\_level
to disable project repository \([https\://github\.com/ansible\-collections/community\.general/pull/8674](https\://github\.com/ansible\-collections/community\.general/pull/8674)\)\.
-* gitlab\_project \- add option service\_desk\_enabled
to disable service desk \([https\://github\.com/ansible\-collections/community\.general/pull/8688](https\://github\.com/ansible\-collections/community\.general/pull/8688)\)\.
-* locale\_gen \- add support for multiple locales \([https\://github\.com/ansible\-collections/community\.general/issues/8677](https\://github\.com/ansible\-collections/community\.general/issues/8677)\, [https\://github\.com/ansible\-collections/community\.general/pull/8682](https\://github\.com/ansible\-collections/community\.general/pull/8682)\)\.
-* memcached\, pickle\, redis\, yaml cache plugins \- make sure that all options are typed \([https\://github\.com/ansible\-collections/community\.general/pull/8624](https\://github\.com/ansible\-collections/community\.general/pull/8624)\)\.
-* opentelemetry callback plugin \- fix default value for store\_spans\_in\_file
causing traces to be produced to a file named None
\([https\://github\.com/ansible\-collections/community\.general/issues/8566](https\://github\.com/ansible\-collections/community\.general/issues/8566)\, [https\://github\.com/ansible\-collections/community\.general/pull/8741](https\://github\.com/ansible\-collections/community\.general/pull/8741)\)\.
-* passwordstore lookup plugin \- add the current user to the lockfile file name to address issues on multi\-user systems \([https\://github\.com/ansible\-collections/community\.general/pull/8689](https\://github\.com/ansible\-collections/community\.general/pull/8689)\)\.
-* pipx \- add parameter suffix
to module \([https\://github\.com/ansible\-collections/community\.general/pull/8675](https\://github\.com/ansible\-collections/community\.general/pull/8675)\, [https\://github\.com/ansible\-collections/community\.general/issues/8656](https\://github\.com/ansible\-collections/community\.general/issues/8656)\)\.
-* pkgng \- add option use\_globs
\(default true
\) to optionally disable glob patterns \([https\://github\.com/ansible\-collections/community\.general/issues/8632](https\://github\.com/ansible\-collections/community\.general/issues/8632)\, [https\://github\.com/ansible\-collections/community\.general/pull/8633](https\://github\.com/ansible\-collections/community\.general/pull/8633)\)\.
-* proxmox inventory plugin \- add new fact for LXC interface details \([https\://github\.com/ansible\-collections/community\.general/pull/8713](https\://github\.com/ansible\-collections/community\.general/pull/8713)\)\.
-* redis\, redis\_info \- add client\_cert
and client\_key
options to specify path to certificate for Redis authentication \([https\://github\.com/ansible\-collections/community\.general/pull/8654](https\://github\.com/ansible\-collections/community\.general/pull/8654)\)\.
-
-
-### Bugfixes
-
-* gitlab\_runner \- fix paused
parameter being ignored \([https\://github\.com/ansible\-collections/community\.general/pull/8648](https\://github\.com/ansible\-collections/community\.general/pull/8648)\)\.
-* homebrew\_cask \- fix upgrade\_all
returns changed
when nothing upgraded \([https\://github\.com/ansible\-collections/community\.general/issues/8707](https\://github\.com/ansible\-collections/community\.general/issues/8707)\, [https\://github\.com/ansible\-collections/community\.general/pull/8708](https\://github\.com/ansible\-collections/community\.general/pull/8708)\)\.
-* keycloak\_user\_federation \- get cleartext IDP clientSecret
from full realm info to detect changes to it \([https\://github\.com/ansible\-collections/community\.general/issues/8294](https\://github\.com/ansible\-collections/community\.general/issues/8294)\, [https\://github\.com/ansible\-collections/community\.general/pull/8735](https\://github\.com/ansible\-collections/community\.general/pull/8735)\)\.
-* keycloak\_user\_federation \- remove existing user federation mappers if they are not present in the federation configuration and will not be updated \([https\://github\.com/ansible\-collections/community\.general/issues/7169](https\://github\.com/ansible\-collections/community\.general/issues/7169)\, [https\://github\.com/ansible\-collections/community\.general/pull/8695](https\://github\.com/ansible\-collections/community\.general/pull/8695)\)\.
-* proxmox \- fixed an issue where the new volume handling incorrectly converted null
values into \"None\"
strings \([https\://github\.com/ansible\-collections/community\.general/pull/8646](https\://github\.com/ansible\-collections/community\.general/pull/8646)\)\.
-* proxmox \- fixed an issue where volume strings where overwritten instead of appended to in the new build\_volume\(\)
method \([https\://github\.com/ansible\-collections/community\.general/pull/8646](https\://github\.com/ansible\-collections/community\.general/pull/8646)\)\.
-* proxmox \- removed the forced conversion of non\-string values to strings to be consistent with the module documentation \([https\://github\.com/ansible\-collections/community\.general/pull/8646](https\://github\.com/ansible\-collections/community\.general/pull/8646)\)\.
-
-
-### New Modules
-
-* community\.general\.bootc\_manage \- Bootc Switch and Upgrade\.
-* community\.general\.homebrew\_services \- Services manager for Homebrew\.
-* community\.general\.keycloak\_realm\_keys\_metadata\_info \- Allows obtaining Keycloak realm keys metadata via Keycloak API\.
-
-
-## v9\.2\.0
-
-
-### Release Summary
-
-Regular bugfix and feature release\.
-
-
-### Minor Changes
-
-* CmdRunner module utils \- the parameter force\_lang
now supports the special value auto
which will automatically try and determine the best parsable locale in the system \([https\://github\.com/ansible\-collections/community\.general/pull/8517](https\://github\.com/ansible\-collections/community\.general/pull/8517)\)\.
-* proxmox \- add disk\_volume
and mount\_volumes
keys for better readability \([https\://github\.com/ansible\-collections/community\.general/pull/8542](https\://github\.com/ansible\-collections/community\.general/pull/8542)\)\.
-* proxmox \- translate the old disk
and mounts
keys to the new handling internally \([https\://github\.com/ansible\-collections/community\.general/pull/8542](https\://github\.com/ansible\-collections/community\.general/pull/8542)\)\.
-* proxmox\_template \- small refactor in logic for determining whether a template exists or not \([https\://github\.com/ansible\-collections/community\.general/pull/8516](https\://github\.com/ansible\-collections/community\.general/pull/8516)\)\.
-* redfish\_\* modules \- adds ciphers
option for custom cipher selection \([https\://github\.com/ansible\-collections/community\.general/pull/8533](https\://github\.com/ansible\-collections/community\.general/pull/8533)\)\.
-* sudosu become plugin \- added an option \(alt\_method
\) to enhance compatibility with more versions of su
\([https\://github\.com/ansible\-collections/community\.general/pull/8214](https\://github\.com/ansible\-collections/community\.general/pull/8214)\)\.
-* virtualbox inventory plugin \- expose a new parameter enable\_advanced\_group\_parsing
to change how the VirtualBox dynamic inventory parses VM groups \([https\://github\.com/ansible\-collections/community\.general/issues/8508](https\://github\.com/ansible\-collections/community\.general/issues/8508)\, [https\://github\.com/ansible\-collections/community\.general/pull/8510](https\://github\.com/ansible\-collections/community\.general/pull/8510)\)\.
-* wdc\_redfish\_command \- minor change to handle upgrade file for Redfish WD platforms \([https\://github\.com/ansible\-collections/community\.general/pull/8444](https\://github\.com/ansible\-collections/community\.general/pull/8444)\)\.
-
-
-### Bugfixes
-
-* bitwarden lookup plugin \- fix KeyError
in search\_field
\([https\://github\.com/ansible\-collections/community\.general/issues/8549](https\://github\.com/ansible\-collections/community\.general/issues/8549)\, [https\://github\.com/ansible\-collections/community\.general/pull/8557](https\://github\.com/ansible\-collections/community\.general/pull/8557)\)\.
-* keycloak\_clientscope \- remove IDs from clientscope and its protocol mappers on comparison for changed check \([https\://github\.com/ansible\-collections/community\.general/pull/8545](https\://github\.com/ansible\-collections/community\.general/pull/8545)\)\.
-* nsupdate \- fix \'index out of range\' error when changing NS records by falling back to authority section of the response \([https\://github\.com/ansible\-collections/community\.general/issues/8612](https\://github\.com/ansible\-collections/community\.general/issues/8612)\, [https\://github\.com/ansible\-collections/community\.general/pull/8614](https\://github\.com/ansible\-collections/community\.general/pull/8614)\)\.
-* proxmox \- fix idempotency on creation of mount volumes using Proxmox\' special \\:\
syntax \([https\://github\.com/ansible\-collections/community\.general/issues/8407](https\://github\.com/ansible\-collections/community\.general/issues/8407)\, [https\://github\.com/ansible\-collections/community\.general/pull/8542](https\://github\.com/ansible\-collections/community\.general/pull/8542)\)\.
-* redfish\_utils module utils \- do not fail when language is not exactly \"en\" \([https\://github\.com/ansible\-collections/community\.general/pull/8613](https\://github\.com/ansible\-collections/community\.general/pull/8613)\)\.
-
-
-### New Plugins
-
-
-#### Filter
-
-* community\.general\.reveal\_ansible\_type \- Return input type\.
-
-
-#### Test
-
-* community\.general\.ansible\_type \- Validate input type\.
-
-
-## v9\.1\.0
-
-
-### Release Summary
-
-Regular feature and bugfix release\.
-
-
-### Minor Changes
-
-* CmdRunner module util \- argument formats can be specified as plain functions without calling cmd\_runner\_fmt\.as\_func\(\)
\([https\://github\.com/ansible\-collections/community\.general/pull/8479](https\://github\.com/ansible\-collections/community\.general/pull/8479)\)\.
-* ansible\_galaxy\_install \- add upgrade feature \([https\://github\.com/ansible\-collections/community\.general/pull/8431](https\://github\.com/ansible\-collections/community\.general/pull/8431)\, [https\://github\.com/ansible\-collections/community\.general/issues/8351](https\://github\.com/ansible\-collections/community\.general/issues/8351)\)\.
-* cargo \- add option directory
\, which allows source directory to be specified \([https\://github\.com/ansible\-collections/community\.general/pull/8480](https\://github\.com/ansible\-collections/community\.general/pull/8480)\)\.
-* cmd\_runner module utils \- add decorator cmd\_runner\_fmt\.stack
\([https\://github\.com/ansible\-collections/community\.general/pull/8415](https\://github\.com/ansible\-collections/community\.general/pull/8415)\)\.
-* cmd\_runner\_fmt module utils \- simplify implementation of cmd\_runner\_fmt\.as\_bool\_not\(\)
\([https\://github\.com/ansible\-collections/community\.general/pull/8512](https\://github\.com/ansible\-collections/community\.general/pull/8512)\)\.
-* ipa\_dnsrecord \- adds SSHFP
record type for managing SSH fingerprints in FreeIPA DNS \([https\://github\.com/ansible\-collections/community\.general/pull/8404](https\://github\.com/ansible\-collections/community\.general/pull/8404)\)\.
-* keycloak\_client \- assign auth flow by name \([https\://github\.com/ansible\-collections/community\.general/pull/8428](https\://github\.com/ansible\-collections/community\.general/pull/8428)\)\.
-* openbsd\_pkg \- adds diff support to show changes in installed package list\. This does not yet work for check mode \([https\://github\.com/ansible\-collections/community\.general/pull/8402](https\://github\.com/ansible\-collections/community\.general/pull/8402)\)\.
-* proxmox \- allow specification of the API port when using proxmox\_\* \([https\://github\.com/ansible\-collections/community\.general/issues/8440](https\://github\.com/ansible\-collections/community\.general/issues/8440)\, [https\://github\.com/ansible\-collections/community\.general/pull/8441](https\://github\.com/ansible\-collections/community\.general/pull/8441)\)\.
-* proxmox\_vm\_info \- add network
option to retrieve current network information \([https\://github\.com/ansible\-collections/community\.general/pull/8471](https\://github\.com/ansible\-collections/community\.general/pull/8471)\)\.
-* redfish\_command \- add wait
and wait\_timeout
options to allow a user to block a command until a service is accessible after performing the requested command \([https\://github\.com/ansible\-collections/community\.general/issues/8051](https\://github\.com/ansible\-collections/community\.general/issues/8051)\, [https\://github\.com/ansible\-collections/community\.general/pull/8434](https\://github\.com/ansible\-collections/community\.general/pull/8434)\)\.
-* redfish\_info \- add command CheckAvailability
to check if a service is accessible \([https\://github\.com/ansible\-collections/community\.general/issues/8051](https\://github\.com/ansible\-collections/community\.general/issues/8051)\, [https\://github\.com/ansible\-collections/community\.general/pull/8434](https\://github\.com/ansible\-collections/community\.general/pull/8434)\)\.
-* redis\_info \- adds support for getting cluster info \([https\://github\.com/ansible\-collections/community\.general/pull/8464](https\://github\.com/ansible\-collections/community\.general/pull/8464)\)\.
-
-
-### Deprecated Features
-
-* CmdRunner module util \- setting the value of the ignore\_none
parameter within a CmdRunner
context is deprecated and that feature should be removed in community\.general 12\.0\.0 \([https\://github\.com/ansible\-collections/community\.general/pull/8479](https\://github\.com/ansible\-collections/community\.general/pull/8479)\)\.
-* git\_config \- the list\_all
option has been deprecated and will be removed in community\.general 11\.0\.0\. Use the community\.general\.git\_config\_info
module instead \([https\://github\.com/ansible\-collections/community\.general/pull/8453](https\://github\.com/ansible\-collections/community\.general/pull/8453)\)\.
-* git\_config \- using state\=present
without providing value
is deprecated and will be disallowed in community\.general 11\.0\.0\. Use the community\.general\.git\_config\_info
module instead to read a value \([https\://github\.com/ansible\-collections/community\.general/pull/8453](https\://github\.com/ansible\-collections/community\.general/pull/8453)\)\.
-
-
-### Bugfixes
-
-* git\_config \- fix behavior of state\=absent
if value
is present \([https\://github\.com/ansible\-collections/community\.general/issues/8436](https\://github\.com/ansible\-collections/community\.general/issues/8436)\, [https\://github\.com/ansible\-collections/community\.general/pull/8452](https\://github\.com/ansible\-collections/community\.general/pull/8452)\)\.
-* keycloak\_realm \- add normalizations for attributes
and protocol\_mappers
\([https\://github\.com/ansible\-collections/community\.general/pull/8496](https\://github\.com/ansible\-collections/community\.general/pull/8496)\)\.
-* launched \- correctly report changed status in check mode \([https\://github\.com/ansible\-collections/community\.general/pull/8406](https\://github\.com/ansible\-collections/community\.general/pull/8406)\)\.
-* opennebula inventory plugin \- fix invalid reference to IP when inventory runs against NICs with no IPv4 address \([https\://github\.com/ansible\-collections/community\.general/pull/8489](https\://github\.com/ansible\-collections/community\.general/pull/8489)\)\.
-* opentelemetry callback \- do not save the JSON response when using the ansible\.builtin\.uri
module \([https\://github\.com/ansible\-collections/community\.general/pull/8430](https\://github\.com/ansible\-collections/community\.general/pull/8430)\)\.
-* opentelemetry callback \- do not save the content response when using the ansible\.builtin\.slurp
module \([https\://github\.com/ansible\-collections/community\.general/pull/8430](https\://github\.com/ansible\-collections/community\.general/pull/8430)\)\.
-* paman \- do not fail if an empty list of packages has been provided and there is nothing to do \([https\://github\.com/ansible\-collections/community\.general/pull/8514](https\://github\.com/ansible\-collections/community\.general/pull/8514)\)\.
-
-
-### Known Issues
-
-* homectl \- the module does not work under Python 3\.13 or newer\, since it relies on the removed crypt
standard library module \([https\://github\.com/ansible\-collections/community\.general/issues/4691](https\://github\.com/ansible\-collections/community\.general/issues/4691)\, [https\://github\.com/ansible\-collections/community\.general/pull/8497](https\://github\.com/ansible\-collections/community\.general/pull/8497)\)\.
-* udm\_user \- the module does not work under Python 3\.13 or newer\, since it relies on the removed crypt
standard library module \([https\://github\.com/ansible\-collections/community\.general/issues/4690](https\://github\.com/ansible\-collections/community\.general/issues/4690)\, [https\://github\.com/ansible\-collections/community\.general/pull/8497](https\://github\.com/ansible\-collections/community\.general/pull/8497)\)\.
-
-
-### New Plugins
-
-
-#### Filter
-
-* community\.general\.keep\_keys \- Keep specific keys from dictionaries in a list\.
-* community\.general\.remove\_keys \- Remove specific keys from dictionaries in a list\.
-* community\.general\.replace\_keys \- Replace specific keys in a list of dictionaries\.
-
-
-### New Modules
-
-* community\.general\.consul\_agent\_check \- Add\, modify\, and delete checks within a consul cluster\.
-* community\.general\.consul\_agent\_service \- Add\, modify and delete services within a consul cluster\.
-* community\.general\.django\_check \- Wrapper for C\(django\-admin check\)\.
-* community\.general\.django\_createcachetable \- Wrapper for C\(django\-admin createcachetable\)\.
-
-
-## v9\.0\.1
-
-
-### Release Summary
-
-Bugfix release for inclusion in Ansible 10\.0\.0rc1\.
-
-
-### Minor Changes
-
-* ansible\_galaxy\_install \- minor refactor in the module \([https\://github\.com/ansible\-collections/community\.general/pull/8413](https\://github\.com/ansible\-collections/community\.general/pull/8413)\)\.
-
-
-### Bugfixes
-
-* cpanm \- use new VarDict
to prevent deprecation warning \([https\://github\.com/ansible\-collections/community\.general/issues/8410](https\://github\.com/ansible\-collections/community\.general/issues/8410)\, [https\://github\.com/ansible\-collections/community\.general/pull/8411](https\://github\.com/ansible\-collections/community\.general/pull/8411)\)\.
-* django module utils \- use new VarDict
to prevent deprecation warning \([https\://github\.com/ansible\-collections/community\.general/issues/8410](https\://github\.com/ansible\-collections/community\.general/issues/8410)\, [https\://github\.com/ansible\-collections/community\.general/pull/8411](https\://github\.com/ansible\-collections/community\.general/pull/8411)\)\.
-* gconftool2\_info \- use new VarDict
to prevent deprecation warning \([https\://github\.com/ansible\-collections/community\.general/issues/8410](https\://github\.com/ansible\-collections/community\.general/issues/8410)\, [https\://github\.com/ansible\-collections/community\.general/pull/8411](https\://github\.com/ansible\-collections/community\.general/pull/8411)\)\.
-* homebrew \- do not fail when brew prints warnings \([https\://github\.com/ansible\-collections/community\.general/pull/8406](https\://github\.com/ansible\-collections/community\.general/pull/8406)\, [https\://github\.com/ansible\-collections/community\.general/issues/7044](https\://github\.com/ansible\-collections/community\.general/issues/7044)\)\.
-* hponcfg \- use new VarDict
to prevent deprecation warning \([https\://github\.com/ansible\-collections/community\.general/issues/8410](https\://github\.com/ansible\-collections/community\.general/issues/8410)\, [https\://github\.com/ansible\-collections/community\.general/pull/8411](https\://github\.com/ansible\-collections/community\.general/pull/8411)\)\.
-* kernel\_blacklist \- use new VarDict
to prevent deprecation warning \([https\://github\.com/ansible\-collections/community\.general/issues/8410](https\://github\.com/ansible\-collections/community\.general/issues/8410)\, [https\://github\.com/ansible\-collections/community\.general/pull/8411](https\://github\.com/ansible\-collections/community\.general/pull/8411)\)\.
-* keycloak\_client \- fix TypeError when sanitizing the saml\.signing\.private\.key
attribute in the module\'s diff or state output\. The sanitize\_cr
function expected a dict where in some cases a list might occur \([https\://github\.com/ansible\-collections/community\.general/pull/8403](https\://github\.com/ansible\-collections/community\.general/pull/8403)\)\.
-* locale\_gen \- use new VarDict
to prevent deprecation warning \([https\://github\.com/ansible\-collections/community\.general/issues/8410](https\://github\.com/ansible\-collections/community\.general/issues/8410)\, [https\://github\.com/ansible\-collections/community\.general/pull/8411](https\://github\.com/ansible\-collections/community\.general/pull/8411)\)\.
-* mksysb \- use new VarDict
to prevent deprecation warning \([https\://github\.com/ansible\-collections/community\.general/issues/8410](https\://github\.com/ansible\-collections/community\.general/issues/8410)\, [https\://github\.com/ansible\-collections/community\.general/pull/8411](https\://github\.com/ansible\-collections/community\.general/pull/8411)\)\.
-* pipx\_info \- use new VarDict
to prevent deprecation warning \([https\://github\.com/ansible\-collections/community\.general/issues/8410](https\://github\.com/ansible\-collections/community\.general/issues/8410)\, [https\://github\.com/ansible\-collections/community\.general/pull/8411](https\://github\.com/ansible\-collections/community\.general/pull/8411)\)\.
-* snap \- use new VarDict
to prevent deprecation warning \([https\://github\.com/ansible\-collections/community\.general/issues/8410](https\://github\.com/ansible\-collections/community\.general/issues/8410)\, [https\://github\.com/ansible\-collections/community\.general/pull/8411](https\://github\.com/ansible\-collections/community\.general/pull/8411)\)\.
-* snap\_alias \- use new VarDict
to prevent deprecation warning \([https\://github\.com/ansible\-collections/community\.general/issues/8410](https\://github\.com/ansible\-collections/community\.general/issues/8410)\, [https\://github\.com/ansible\-collections/community\.general/pull/8411](https\://github\.com/ansible\-collections/community\.general/pull/8411)\)\.
-
-
-## v9\.0\.0
-
-
-### Release Summary
-
-This is release 9\.0\.0 of community\.general
\, released on 2024\-05\-20\.
-
-
-### Minor Changes
-
-* PythonRunner module utils \- specialisation of CmdRunner
to execute Python scripts \([https\://github\.com/ansible\-collections/community\.general/pull/8289](https\://github\.com/ansible\-collections/community\.general/pull/8289)\)\.
-* Use offset\-aware datetime\.datetime
objects \(with timezone UTC\) instead of offset\-naive UTC timestamps\, which are deprecated in Python 3\.12 \([https\://github\.com/ansible\-collections/community\.general/pull/8222](https\://github\.com/ansible\-collections/community\.general/pull/8222)\)\.
-* aix\_lvol \- refactor module to pass list of arguments to module\.run\_command\(\)
instead of relying on interpretation by a shell \([https\://github\.com/ansible\-collections/community\.general/pull/8264](https\://github\.com/ansible\-collections/community\.general/pull/8264)\)\.
-* apt\_rpm \- add new states latest
and present\_not\_latest
\. The value latest
is equivalent to the current behavior of present
\, which will upgrade a package if a newer version exists\. present\_not\_latest
does what most users would expect present
to do\: it does not upgrade if the package is already installed\. The current behavior of present
will be deprecated in a later version\, and eventually changed to that of present\_not\_latest
\([https\://github\.com/ansible\-collections/community\.general/issues/8217](https\://github\.com/ansible\-collections/community\.general/issues/8217)\, [https\://github\.com/ansible\-collections/community\.general/pull/8247](https\://github\.com/ansible\-collections/community\.general/pull/8247)\)\.
-* apt\_rpm \- refactor module to pass list of arguments to module\.run\_command\(\)
instead of relying on interpretation by a shell \([https\://github\.com/ansible\-collections/community\.general/pull/8264](https\://github\.com/ansible\-collections/community\.general/pull/8264)\)\.
-* bitwarden lookup plugin \- add bw\_session
option\, to pass session key instead of reading from env \([https\://github\.com/ansible\-collections/community\.general/pull/7994](https\://github\.com/ansible\-collections/community\.general/pull/7994)\)\.
-* bitwarden lookup plugin \- add support to filter by organization ID \([https\://github\.com/ansible\-collections/community\.general/pull/8188](https\://github\.com/ansible\-collections/community\.general/pull/8188)\)\.
-* bitwarden lookup plugin \- allows to fetch all records of a given collection ID\, by allowing to pass an empty value for search\_value
when collection\_id
is provided \([https\://github\.com/ansible\-collections/community\.general/pull/8013](https\://github\.com/ansible\-collections/community\.general/pull/8013)\)\.
-* bitwarden lookup plugin \- when looking for items using an item ID\, the item is now accessed directly with bw get item
instead of searching through all items\. This doubles the lookup speed \([https\://github\.com/ansible\-collections/community\.general/pull/7468](https\://github\.com/ansible\-collections/community\.general/pull/7468)\)\.
-* btrfs\_subvolume \- refactor module to pass list of arguments to module\.run\_command\(\)
instead of relying on interpretation by a shell \([https\://github\.com/ansible\-collections/community\.general/pull/8264](https\://github\.com/ansible\-collections/community\.general/pull/8264)\)\.
-* cmd\_runner module\_utils \- add validation for minimum and maximum length in the value passed to cmd\_runner\_fmt\.as\_list\(\)
\([https\://github\.com/ansible\-collections/community\.general/pull/8288](https\://github\.com/ansible\-collections/community\.general/pull/8288)\)\.
-* consul\_auth\_method\, consul\_binding\_rule\, consul\_policy\, consul\_role\, consul\_session\, consul\_token \- added action group community\.general\.consul
\([https\://github\.com/ansible\-collections/community\.general/pull/7897](https\://github\.com/ansible\-collections/community\.general/pull/7897)\)\.
-* consul\_policy \- added support for diff and check mode \([https\://github\.com/ansible\-collections/community\.general/pull/7878](https\://github\.com/ansible\-collections/community\.general/pull/7878)\)\.
-* consul\_policy\, consul\_role\, consul\_session \- removed dependency on requests
and factored out common parts \([https\://github\.com/ansible\-collections/community\.general/pull/7826](https\://github\.com/ansible\-collections/community\.general/pull/7826)\, [https\://github\.com/ansible\-collections/community\.general/pull/7878](https\://github\.com/ansible\-collections/community\.general/pull/7878)\)\.
-* consul\_role \- node\_identities
now expects a node\_name
option to match the Consul API\, the old name
is still supported as alias \([https\://github\.com/ansible\-collections/community\.general/pull/7878](https\://github\.com/ansible\-collections/community\.general/pull/7878)\)\.
-* consul\_role \- service\_identities
now expects a service\_name
option to match the Consul API\, the old name
is still supported as alias \([https\://github\.com/ansible\-collections/community\.general/pull/7878](https\://github\.com/ansible\-collections/community\.general/pull/7878)\)\.
-* consul\_role \- added support for diff mode \([https\://github\.com/ansible\-collections/community\.general/pull/7878](https\://github\.com/ansible\-collections/community\.general/pull/7878)\)\.
-* consul\_role \- added support for templated policies \([https\://github\.com/ansible\-collections/community\.general/pull/7878](https\://github\.com/ansible\-collections/community\.general/pull/7878)\)\.
-* elastic callback plugin \- close elastic client to not leak resources \([https\://github\.com/ansible\-collections/community\.general/pull/7517](https\://github\.com/ansible\-collections/community\.general/pull/7517)\)\.
-* filesystem \- add bcachefs support \([https\://github\.com/ansible\-collections/community\.general/pull/8126](https\://github\.com/ansible\-collections/community\.general/pull/8126)\)\.
-* gandi\_livedns \- adds support for personal access tokens \([https\://github\.com/ansible\-collections/community\.general/issues/7639](https\://github\.com/ansible\-collections/community\.general/issues/7639)\, [https\://github\.com/ansible\-collections/community\.general/pull/8337](https\://github\.com/ansible\-collections/community\.general/pull/8337)\)\.
-* gconftool2 \- use ModuleHelper
with VarDict
\([https\://github\.com/ansible\-collections/community\.general/pull/8226](https\://github\.com/ansible\-collections/community\.general/pull/8226)\)\.
-* git\_config \- allow multiple git configs for the same name with the new add\_mode
option \([https\://github\.com/ansible\-collections/community\.general/pull/7260](https\://github\.com/ansible\-collections/community\.general/pull/7260)\)\.
-* git\_config \- the after
and before
fields in the diff
of the return value can be a list instead of a string in case more configs with the same key are affected \([https\://github\.com/ansible\-collections/community\.general/pull/7260](https\://github\.com/ansible\-collections/community\.general/pull/7260)\)\.
-* git\_config \- when a value is unset\, all configs with the same key are unset \([https\://github\.com/ansible\-collections/community\.general/pull/7260](https\://github\.com/ansible\-collections/community\.general/pull/7260)\)\.
-* gitlab modules \- add ca\_path
option \([https\://github\.com/ansible\-collections/community\.general/pull/7472](https\://github\.com/ansible\-collections/community\.general/pull/7472)\)\.
-* gitlab modules \- remove duplicate gitlab
package check \([https\://github\.com/ansible\-collections/community\.general/pull/7486](https\://github\.com/ansible\-collections/community\.general/pull/7486)\)\.
-* gitlab\_deploy\_key\, gitlab\_group\_members\, gitlab\_group\_variable\, gitlab\_hook\, gitlab\_instance\_variable\, gitlab\_project\_badge\, gitlab\_project\_variable\, gitlab\_user \- improve API pagination and compatibility with different versions of python\-gitlab
\([https\://github\.com/ansible\-collections/community\.general/pull/7790](https\://github\.com/ansible\-collections/community\.general/pull/7790)\)\.
-* gitlab\_hook \- adds releases\_events
parameter for supporting Releases events triggers on GitLab hooks \([https\://github\.com/ansible\-collections/community\.general/pull/7956](https\://github\.com/ansible\-collections/community\.general/pull/7956)\)\.
-* gitlab\_runner \- add support for new runner creation workflow \([https\://github\.com/ansible\-collections/community\.general/pull/7199](https\://github\.com/ansible\-collections/community\.general/pull/7199)\)\.
-* homebrew \- adds force\_formula
parameter to disambiguate a formula from a cask of the same name \([https\://github\.com/ansible\-collections/community\.general/issues/8274](https\://github\.com/ansible\-collections/community\.general/issues/8274)\)\.
-* homebrew\, homebrew\_cask \- refactor common argument validation logic into a dedicated homebrew
module utils \([https\://github\.com/ansible\-collections/community\.general/issues/8323](https\://github\.com/ansible\-collections/community\.general/issues/8323)\, [https\://github\.com/ansible\-collections/community\.general/pull/8324](https\://github\.com/ansible\-collections/community\.general/pull/8324)\)\.
-* icinga2 inventory plugin \- add Jinja2 templating support to url
\, user
\, and password
paramenters \([https\://github\.com/ansible\-collections/community\.general/issues/7074](https\://github\.com/ansible\-collections/community\.general/issues/7074)\, [https\://github\.com/ansible\-collections/community\.general/pull/7996](https\://github\.com/ansible\-collections/community\.general/pull/7996)\)\.
-* icinga2 inventory plugin \- adds new parameter group\_by\_hostgroups
in order to make grouping by Icinga2 hostgroups optional \([https\://github\.com/ansible\-collections/community\.general/pull/7998](https\://github\.com/ansible\-collections/community\.general/pull/7998)\)\.
-* ini\_file \- add an optional parameter section\_has\_values
\. If the target ini file contains more than one section
\, use section\_has\_values
to specify which one should be updated \([https\://github\.com/ansible\-collections/community\.general/pull/7505](https\://github\.com/ansible\-collections/community\.general/pull/7505)\)\.
-* ini\_file \- support optional spaces between section names and their surrounding brackets \([https\://github\.com/ansible\-collections/community\.general/pull/8075](https\://github\.com/ansible\-collections/community\.general/pull/8075)\)\.
-* installp \- refactor module to pass list of arguments to module\.run\_command\(\)
instead of relying on interpretation by a shell \([https\://github\.com/ansible\-collections/community\.general/pull/8264](https\://github\.com/ansible\-collections/community\.general/pull/8264)\)\.
-* ipa\_config \- adds passkey
choice to ipauserauthtype
parameter\'s choices \([https\://github\.com/ansible\-collections/community\.general/pull/7588](https\://github\.com/ansible\-collections/community\.general/pull/7588)\)\.
-* ipa\_dnsrecord \- adds ability to manage NS record types \([https\://github\.com/ansible\-collections/community\.general/pull/7737](https\://github\.com/ansible\-collections/community\.general/pull/7737)\)\.
-* ipa\_pwpolicy \- refactor module and exchange a sequence if
statements with a for
loop \([https\://github\.com/ansible\-collections/community\.general/pull/7723](https\://github\.com/ansible\-collections/community\.general/pull/7723)\)\.
-* ipa\_pwpolicy \- update module to support maxrepeat
\, maxsequence
\, dictcheck
\, usercheck
\, gracelimit
parameters in FreeIPA password policies \([https\://github\.com/ansible\-collections/community\.general/pull/7723](https\://github\.com/ansible\-collections/community\.general/pull/7723)\)\.
-* ipa\_sudorule \- adds options to include denied commands or command groups \([https\://github\.com/ansible\-collections/community\.general/pull/7415](https\://github\.com/ansible\-collections/community\.general/pull/7415)\)\.
-* ipa\_user \- adds idp
and passkey
choice to ipauserauthtype
parameter\'s choices \([https\://github\.com/ansible\-collections/community\.general/pull/7589](https\://github\.com/ansible\-collections/community\.general/pull/7589)\)\.
-* irc \- add validate\_certs
option\, and rename use\_ssl
to use\_tls
\, while keeping use\_ssl
as an alias\. The default value for validate\_certs
is false
for backwards compatibility\. We recommend to every user of this module to explicitly set use\_tls\=true
and validate\_certs\=true\` whenever possible\, especially when communicating to IRC servers over the internet \([https\://github\.com/ansible\-collections/community\.general/pull/7550](https\://github\.com/ansible\-collections/community\.general/pull/7550)\)\.
-* java\_cert \- add cert\_content
argument \([https\://github\.com/ansible\-collections/community\.general/pull/8153](https\://github\.com/ansible\-collections/community\.general/pull/8153)\)\.
-* java\_cert \- enable owner
\, group
\, mode
\, and other generic file arguments \([https\://github\.com/ansible\-collections/community\.general/pull/8116](https\://github\.com/ansible\-collections/community\.general/pull/8116)\)\.
-* kernel\_blacklist \- use ModuleHelper
with VarDict
\([https\://github\.com/ansible\-collections/community\.general/pull/8226](https\://github\.com/ansible\-collections/community\.general/pull/8226)\)\.
-* keycloak module utils \- expose error message from Keycloak server for HTTP errors in some specific situations \([https\://github\.com/ansible\-collections/community\.general/pull/7645](https\://github\.com/ansible\-collections/community\.general/pull/7645)\)\.
-* keycloak\_client\, keycloak\_clientscope\, keycloak\_clienttemplate \- added docker\-v2
protocol support\, enhancing alignment with Keycloak\'s protocol options \([https\://github\.com/ansible\-collections/community\.general/issues/8215](https\://github\.com/ansible\-collections/community\.general/issues/8215)\, [https\://github\.com/ansible\-collections/community\.general/pull/8216](https\://github\.com/ansible\-collections/community\.general/pull/8216)\)\.
-* keycloak\_realm\_key \- the config\.algorithm
option now supports 8 additional key algorithms \([https\://github\.com/ansible\-collections/community\.general/pull/7698](https\://github\.com/ansible\-collections/community\.general/pull/7698)\)\.
-* keycloak\_realm\_key \- the config\.certificate
option value is no longer defined with no\_log\=True
\([https\://github\.com/ansible\-collections/community\.general/pull/7698](https\://github\.com/ansible\-collections/community\.general/pull/7698)\)\.
-* keycloak\_realm\_key \- the provider\_id
option now supports RSA encryption key usage \(value rsa\-enc
\) \([https\://github\.com/ansible\-collections/community\.general/pull/7698](https\://github\.com/ansible\-collections/community\.general/pull/7698)\)\.
-* keycloak\_user\_federation \- add option for krbPrincipalAttribute
\([https\://github\.com/ansible\-collections/community\.general/pull/7538](https\://github\.com/ansible\-collections/community\.general/pull/7538)\)\.
-* keycloak\_user\_federation \- allow custom user storage providers to be set through provider\_id
\([https\://github\.com/ansible\-collections/community\.general/pull/7789](https\://github\.com/ansible\-collections/community\.general/pull/7789)\)\.
-* ldap\_attrs \- module now supports diff mode\, showing which attributes are changed within an operation \([https\://github\.com/ansible\-collections/community\.general/pull/8073](https\://github\.com/ansible\-collections/community\.general/pull/8073)\)\.
-* lvg \- refactor module to pass list of arguments to module\.run\_command\(\)
instead of relying on interpretation by a shell \([https\://github\.com/ansible\-collections/community\.general/pull/8264](https\://github\.com/ansible\-collections/community\.general/pull/8264)\)\.
-* lvol \- change pvs
argument type to list of strings \([https\://github\.com/ansible\-collections/community\.general/pull/7676](https\://github\.com/ansible\-collections/community\.general/pull/7676)\, [https\://github\.com/ansible\-collections/community\.general/issues/7504](https\://github\.com/ansible\-collections/community\.general/issues/7504)\)\.
-* lvol \- refactor module to pass list of arguments to module\.run\_command\(\)
instead of relying on interpretation by a shell \([https\://github\.com/ansible\-collections/community\.general/pull/8264](https\://github\.com/ansible\-collections/community\.general/pull/8264)\)\.
-* lxd connection plugin \- tighten the detection logic for lxd Instance not found
errors\, to avoid false detection on unrelated errors such as /usr/bin/python3\: not found
\([https\://github\.com/ansible\-collections/community\.general/pull/7521](https\://github\.com/ansible\-collections/community\.general/pull/7521)\)\.
-* lxd\_container \- uses /1\.0/instances
API endpoint\, if available\. Falls back to /1\.0/containers
or /1\.0/virtual\-machines
\. Fixes issue when using Incus or LXD 5\.19 due to migrating to /1\.0/instances
endpoint \([https\://github\.com/ansible\-collections/community\.general/pull/7980](https\://github\.com/ansible\-collections/community\.general/pull/7980)\)\.
-* macports \- refactor module to pass list of arguments to module\.run\_command\(\)
instead of relying on interpretation by a shell \([https\://github\.com/ansible\-collections/community\.general/pull/8264](https\://github\.com/ansible\-collections/community\.general/pull/8264)\)\.
-* mail \- add Message\-ID
header\; which is required by some mail servers \([https\://github\.com/ansible\-collections/community\.general/pull/7740](https\://github\.com/ansible\-collections/community\.general/pull/7740)\)\.
-* mail module\, mail callback plugin \- allow to configure the domain name of the Message\-ID header with a new message\_id\_domain
option \([https\://github\.com/ansible\-collections/community\.general/pull/7765](https\://github\.com/ansible\-collections/community\.general/pull/7765)\)\.
-* mssql\_script \- adds transactional \(rollback/commit\) support via optional boolean param transaction
\([https\://github\.com/ansible\-collections/community\.general/pull/7976](https\://github\.com/ansible\-collections/community\.general/pull/7976)\)\.
-* netcup\_dns \- adds support for record types OPENPGPKEY
\, SMIMEA
\, and SSHFP
\([https\://github\.com/ansible\-collections/community\.general/pull/7489](https\://github\.com/ansible\-collections/community\.general/pull/7489)\)\.
-* nmcli \- add support for new connection type loopback
\([https\://github\.com/ansible\-collections/community\.general/issues/6572](https\://github\.com/ansible\-collections/community\.general/issues/6572)\)\.
-* nmcli \- adds OpenvSwitch support with new type
values ovs\-port
\, ovs\-interface
\, and ovs\-bridge
\, and new slave\_type
value ovs\-port
\([https\://github\.com/ansible\-collections/community\.general/pull/8154](https\://github\.com/ansible\-collections/community\.general/pull/8154)\)\.
-* nmcli \- allow for infiniband
slaves of bond
interface types \([https\://github\.com/ansible\-collections/community\.general/pull/7569](https\://github\.com/ansible\-collections/community\.general/pull/7569)\)\.
-* nmcli \- allow for the setting of MTU
for infiniband
and bond
interface types \([https\://github\.com/ansible\-collections/community\.general/pull/7499](https\://github\.com/ansible\-collections/community\.general/pull/7499)\)\.
-* nmcli \- allow setting MTU
for bond\-slave
interface types \([https\://github\.com/ansible\-collections/community\.general/pull/8118](https\://github\.com/ansible\-collections/community\.general/pull/8118)\)\.
-* onepassword lookup plugin \- support 1Password Connect with the opv2 client by setting the connect\_host and connect\_token parameters \([https\://github\.com/ansible\-collections/community\.general/pull/7116](https\://github\.com/ansible\-collections/community\.general/pull/7116)\)\.
-* onepassword\_raw lookup plugin \- support 1Password Connect with the opv2 client by setting the connect\_host and connect\_token parameters \([https\://github\.com/ansible\-collections/community\.general/pull/7116](https\://github\.com/ansible\-collections/community\.general/pull/7116)\)
-* opentelemetry \- add support for HTTP trace\_exporter and configures the behavior via OTEL\_EXPORTER\_OTLP\_TRACES\_PROTOCOL
\([https\://github\.com/ansible\-collections/community\.general/issues/7888](https\://github\.com/ansible\-collections/community\.general/issues/7888)\, [https\://github\.com/ansible\-collections/community\.general/pull/8321](https\://github\.com/ansible\-collections/community\.general/pull/8321)\)\.
-* opentelemetry \- add support for exporting spans in a file via ANSIBLE\_OPENTELEMETRY\_STORE\_SPANS\_IN\_FILE
\([https\://github\.com/ansible\-collections/community\.general/issues/7888](https\://github\.com/ansible\-collections/community\.general/issues/7888)\, [https\://github\.com/ansible\-collections/community\.general/pull/8363](https\://github\.com/ansible\-collections/community\.general/pull/8363)\)\.
-* opkg \- use ModuleHelper
with VarDict
\([https\://github\.com/ansible\-collections/community\.general/pull/8226](https\://github\.com/ansible\-collections/community\.general/pull/8226)\)\.
-* osx\_defaults \- add option check\_types
to enable changing the type of existing defaults on the fly \([https\://github\.com/ansible\-collections/community\.general/pull/8173](https\://github\.com/ansible\-collections/community\.general/pull/8173)\)\.
-* parted \- refactor module to pass list of arguments to module\.run\_command\(\)
instead of relying on interpretation by a shell \([https\://github\.com/ansible\-collections/community\.general/pull/8264](https\://github\.com/ansible\-collections/community\.general/pull/8264)\)\.
-* passwordstore \- adds timestamp
and preserve
parameters to modify the stored password format \([https\://github\.com/ansible\-collections/community\.general/pull/7426](https\://github\.com/ansible\-collections/community\.general/pull/7426)\)\.
-* passwordstore lookup \- add missing\_subkey
parameter defining the behavior of the lookup when a passwordstore subkey is missing \([https\://github\.com/ansible\-collections/community\.general/pull/8166](https\://github\.com/ansible\-collections/community\.general/pull/8166)\)\.
-* pipx \- use ModuleHelper
with VarDict
\([https\://github\.com/ansible\-collections/community\.general/pull/8226](https\://github\.com/ansible\-collections/community\.general/pull/8226)\)\.
-* pkg5 \- add support for non\-silent execution \([https\://github\.com/ansible\-collections/community\.general/issues/8379](https\://github\.com/ansible\-collections/community\.general/issues/8379)\, [https\://github\.com/ansible\-collections/community\.general/pull/8382](https\://github\.com/ansible\-collections/community\.general/pull/8382)\)\.
-* pkgin \- refactor module to pass list of arguments to module\.run\_command\(\)
instead of relying on interpretation by a shell \([https\://github\.com/ansible\-collections/community\.general/pull/8264](https\://github\.com/ansible\-collections/community\.general/pull/8264)\)\.
-* portage \- adds the possibility to explicitely tell portage to write packages to world file \([https\://github\.com/ansible\-collections/community\.general/issues/6226](https\://github\.com/ansible\-collections/community\.general/issues/6226)\, [https\://github\.com/ansible\-collections/community\.general/pull/8236](https\://github\.com/ansible\-collections/community\.general/pull/8236)\)\.
-* portinstall \- refactor module to pass list of arguments to module\.run\_command\(\)
instead of relying on interpretation by a shell \([https\://github\.com/ansible\-collections/community\.general/pull/8264](https\://github\.com/ansible\-collections/community\.general/pull/8264)\)\.
-* proxmox \- adds startup
parameters to configure startup order\, startup delay and shutdown delay \([https\://github\.com/ansible\-collections/community\.general/pull/8038](https\://github\.com/ansible\-collections/community\.general/pull/8038)\)\.
-* proxmox \- adds template
value to the state
parameter\, allowing conversion of container to a template \([https\://github\.com/ansible\-collections/community\.general/pull/7143](https\://github\.com/ansible\-collections/community\.general/pull/7143)\)\.
-* proxmox \- adds update
parameter\, allowing update of an already existing containers configuration \([https\://github\.com/ansible\-collections/community\.general/pull/7540](https\://github\.com/ansible\-collections/community\.general/pull/7540)\)\.
-* proxmox inventory plugin \- adds an option to exclude nodes from the dynamic inventory generation\. The new setting is optional\, not using this option will behave as usual \([https\://github\.com/ansible\-collections/community\.general/issues/6714](https\://github\.com/ansible\-collections/community\.general/issues/6714)\, [https\://github\.com/ansible\-collections/community\.general/pull/7461](https\://github\.com/ansible\-collections/community\.general/pull/7461)\)\.
-* proxmox\* modules \- there is now a community\.general\.proxmox
module defaults group that can be used to set default options for all Proxmox modules \([https\://github\.com/ansible\-collections/community\.general/pull/8334](https\://github\.com/ansible\-collections/community\.general/pull/8334)\)\.
-* proxmox\_disk \- add ability to manipulate CD\-ROM drive \([https\://github\.com/ansible\-collections/community\.general/pull/7495](https\://github\.com/ansible\-collections/community\.general/pull/7495)\)\.
-* proxmox\_kvm \- add parameter update\_unsafe
to avoid limitations when updating dangerous values \([https\://github\.com/ansible\-collections/community\.general/pull/7843](https\://github\.com/ansible\-collections/community\.general/pull/7843)\)\.
-* proxmox\_kvm \- adds template
value to the state
parameter\, allowing conversion of a VM to a template \([https\://github\.com/ansible\-collections/community\.general/pull/7143](https\://github\.com/ansible\-collections/community\.general/pull/7143)\)\.
-* proxmox\_kvm \- adds\`\`usb\`\` parameter for setting USB devices on proxmox KVM VMs \([https\://github\.com/ansible\-collections/community\.general/pull/8199](https\://github\.com/ansible\-collections/community\.general/pull/8199)\)\.
-* proxmox\_kvm \- support the hookscript
parameter \([https\://github\.com/ansible\-collections/community\.general/issues/7600](https\://github\.com/ansible\-collections/community\.general/issues/7600)\)\.
-* proxmox\_ostype \- it is now possible to specify the ostype
when creating an LXC container \([https\://github\.com/ansible\-collections/community\.general/pull/7462](https\://github\.com/ansible\-collections/community\.general/pull/7462)\)\.
-* proxmox\_vm\_info \- add ability to retrieve configuration info \([https\://github\.com/ansible\-collections/community\.general/pull/7485](https\://github\.com/ansible\-collections/community\.general/pull/7485)\)\.
-* puppet \- new feature to set \-\-waitforlock
option \([https\://github\.com/ansible\-collections/community\.general/pull/8282](https\://github\.com/ansible\-collections/community\.general/pull/8282)\)\.
-* redfish\_command \- add command ResetToDefaults
to reset manager to default state \([https\://github\.com/ansible\-collections/community\.general/issues/8163](https\://github\.com/ansible\-collections/community\.general/issues/8163)\)\.
-* redfish\_config \- add command SetServiceIdentification
to set service identification \([https\://github\.com/ansible\-collections/community\.general/issues/7916](https\://github\.com/ansible\-collections/community\.general/issues/7916)\)\.
-* redfish\_info \- add boolean return value MultipartHttpPush
to GetFirmwareUpdateCapabilities
\([https\://github\.com/ansible\-collections/community\.general/issues/8194](https\://github\.com/ansible\-collections/community\.general/issues/8194)\, [https\://github\.com/ansible\-collections/community\.general/pull/8195](https\://github\.com/ansible\-collections/community\.general/pull/8195)\)\.
-* redfish\_info \- add command GetServiceIdentification
to get service identification \([https\://github\.com/ansible\-collections/community\.general/issues/7882](https\://github\.com/ansible\-collections/community\.general/issues/7882)\)\.
-* redfish\_info \- adding the BootProgress
property when getting Systems
info \([https\://github\.com/ansible\-collections/community\.general/pull/7626](https\://github\.com/ansible\-collections/community\.general/pull/7626)\)\.
-* revbitspss lookup plugin \- removed a redundant unicode prefix\. The prefix was not necessary for Python 3 and has been cleaned up to streamline the code \([https\://github\.com/ansible\-collections/community\.general/pull/8087](https\://github\.com/ansible\-collections/community\.general/pull/8087)\)\.
-* rundeck module utils \- allow to pass Content\-Type
to API requests \([https\://github\.com/ansible\-collections/community\.general/pull/7684](https\://github\.com/ansible\-collections/community\.general/pull/7684)\)\.
-* slackpkg \- refactor module to pass list of arguments to module\.run\_command\(\)
instead of relying on interpretation by a shell \([https\://github\.com/ansible\-collections/community\.general/pull/8264](https\://github\.com/ansible\-collections/community\.general/pull/8264)\)\.
-* ssh\_config \- adds controlmaster
\, controlpath
and controlpersist
parameters \([https\://github\.com/ansible\-collections/community\.general/pull/7456](https\://github\.com/ansible\-collections/community\.general/pull/7456)\)\.
-* ssh\_config \- allow accept\-new
as valid value for strict\_host\_key\_checking
\([https\://github\.com/ansible\-collections/community\.general/pull/8257](https\://github\.com/ansible\-collections/community\.general/pull/8257)\)\.
-* ssh\_config \- new feature to set AddKeysToAgent
option to yes
or no
\([https\://github\.com/ansible\-collections/community\.general/pull/7703](https\://github\.com/ansible\-collections/community\.general/pull/7703)\)\.
-* ssh\_config \- new feature to set IdentitiesOnly
option to yes
or no
\([https\://github\.com/ansible\-collections/community\.general/pull/7704](https\://github\.com/ansible\-collections/community\.general/pull/7704)\)\.
-* sudoers \- add support for the NOEXEC
tag in sudoers rules \([https\://github\.com/ansible\-collections/community\.general/pull/7983](https\://github\.com/ansible\-collections/community\.general/pull/7983)\)\.
-* svr4pkg \- refactor module to pass list of arguments to module\.run\_command\(\)
instead of relying on interpretation by a shell \([https\://github\.com/ansible\-collections/community\.general/pull/8264](https\://github\.com/ansible\-collections/community\.general/pull/8264)\)\.
-* swdepot \- refactor module to pass list of arguments to module\.run\_command\(\)
instead of relying on interpretation by a shell \([https\://github\.com/ansible\-collections/community\.general/pull/8264](https\://github\.com/ansible\-collections/community\.general/pull/8264)\)\.
-* terraform \- add support for diff\_mode
for terraform resource\_changes \([https\://github\.com/ansible\-collections/community\.general/pull/7896](https\://github\.com/ansible\-collections/community\.general/pull/7896)\)\.
-* terraform \- fix diff\_mode
in state absent
and when terraform resource\_changes
does not exist \([https\://github\.com/ansible\-collections/community\.general/pull/7963](https\://github\.com/ansible\-collections/community\.general/pull/7963)\)\.
-* xcc\_redfish\_command \- added support for raw POSTs \(command\=PostResource
in category\=Raw
\) without a specific action info \([https\://github\.com/ansible\-collections/community\.general/pull/7746](https\://github\.com/ansible\-collections/community\.general/pull/7746)\)\.
-* xfconf \- use ModuleHelper
with VarDict
\([https\://github\.com/ansible\-collections/community\.general/pull/8226](https\://github\.com/ansible\-collections/community\.general/pull/8226)\)\.
-* xfconf\_info \- use ModuleHelper
with VarDict
\([https\://github\.com/ansible\-collections/community\.general/pull/8226](https\://github\.com/ansible\-collections/community\.general/pull/8226)\)\.
-
-
-### Breaking Changes / Porting Guide
-
-* cpanm \- the default of the mode
option changed from compatibility
to new
\([https\://github\.com/ansible\-collections/community\.general/pull/8198](https\://github\.com/ansible\-collections/community\.general/pull/8198)\)\.
-* django\_manage \- the module now requires Django \>\= 4\.1 \([https\://github\.com/ansible\-collections/community\.general/pull/8198](https\://github\.com/ansible\-collections/community\.general/pull/8198)\)\.
-* django\_manage \- the module will now fail if virtualenv
is specified but no virtual environment exists at that location \([https\://github\.com/ansible\-collections/community\.general/pull/8198](https\://github\.com/ansible\-collections/community\.general/pull/8198)\)\.
-* redfish\_command\, redfish\_config\, redfish\_info \- change the default for timeout
from 10 to 60 \([https\://github\.com/ansible\-collections/community\.general/pull/8198](https\://github\.com/ansible\-collections/community\.general/pull/8198)\)\.
-
-
-### Deprecated Features
-
-* MH DependencyCtxMgr module\_utils \- deprecate module\_utils\.mh\.mixin\.deps\.DependencyCtxMgr
in favour of module\_utils\.deps
\([https\://github\.com/ansible\-collections/community\.general/pull/8280](https\://github\.com/ansible\-collections/community\.general/pull/8280)\)\.
-* ModuleHelper module\_utils \- deprecate plugins\.module\_utils\.module\_helper\.AnsibleModule
\([https\://github\.com/ansible\-collections/community\.general/pull/8280](https\://github\.com/ansible\-collections/community\.general/pull/8280)\)\.
-* ModuleHelper module\_utils \- deprecate plugins\.module\_utils\.module\_helper\.DependencyCtxMgr
\([https\://github\.com/ansible\-collections/community\.general/pull/8280](https\://github\.com/ansible\-collections/community\.general/pull/8280)\)\.
-* ModuleHelper module\_utils \- deprecate plugins\.module\_utils\.module\_helper\.StateMixin
\([https\://github\.com/ansible\-collections/community\.general/pull/8280](https\://github\.com/ansible\-collections/community\.general/pull/8280)\)\.
-* ModuleHelper module\_utils \- deprecate plugins\.module\_utils\.module\_helper\.VarDict\,
\([https\://github\.com/ansible\-collections/community\.general/pull/8280](https\://github\.com/ansible\-collections/community\.general/pull/8280)\)\.
-* ModuleHelper module\_utils \- deprecate plugins\.module\_utils\.module\_helper\.VarMeta
\([https\://github\.com/ansible\-collections/community\.general/pull/8280](https\://github\.com/ansible\-collections/community\.general/pull/8280)\)\.
-* ModuleHelper module\_utils \- deprecate plugins\.module\_utils\.module\_helper\.VarsMixin
\([https\://github\.com/ansible\-collections/community\.general/pull/8280](https\://github\.com/ansible\-collections/community\.general/pull/8280)\)\.
-* ModuleHelper module\_utils \- deprecate use of VarsMixin
in favor of using the VardDict
module\_utils \([https\://github\.com/ansible\-collections/community\.general/pull/8226](https\://github\.com/ansible\-collections/community\.general/pull/8226)\)\.
-* ModuleHelper vars module\_utils \- bump deprecation of VarMeta
\, VarDict
and VarsMixin
to version 11\.0\.0 \([https\://github\.com/ansible\-collections/community\.general/pull/8226](https\://github\.com/ansible\-collections/community\.general/pull/8226)\)\.
-* apt\_rpm \- the behavior of state\=present
and state\=installed
is deprecated and will change in community\.general 11\.0\.0\. Right now the module will upgrade a package to the latest version if one of these two states is used\. You should explicitly use state\=latest
if you want this behavior\, and switch to state\=present\_not\_latest
if you do not want to upgrade the package if it is already installed\. In community\.general 11\.0\.0 the behavior of state\=present
and state\=installed
will change to that of state\=present\_not\_latest
\([https\://github\.com/ansible\-collections/community\.general/issues/8217](https\://github\.com/ansible\-collections/community\.general/issues/8217)\, [https\://github\.com/ansible\-collections/community\.general/pull/8285](https\://github\.com/ansible\-collections/community\.general/pull/8285)\)\.
-* consul\_acl \- the module has been deprecated and will be removed in community\.general 10\.0\.0\. consul\_token
and consul\_policy
can be used instead \([https\://github\.com/ansible\-collections/community\.general/pull/7901](https\://github\.com/ansible\-collections/community\.general/pull/7901)\)\.
-* django\_manage \- the ack\_venv\_creation\_deprecation
option has no more effect and will be removed from community\.general 11\.0\.0 \([https\://github\.com/ansible\-collections/community\.general/pull/8198](https\://github\.com/ansible\-collections/community\.general/pull/8198)\)\.
-* gitlab modules \- the basic auth method on GitLab API have been deprecated and will be removed in community\.general 10\.0\.0 \([https\://github\.com/ansible\-collections/community\.general/pull/8383](https\://github\.com/ansible\-collections/community\.general/pull/8383)\)\.
-* hipchat callback plugin \- the hipchat service has been discontinued and the self\-hosted variant has been End of Life since 2020\. The callback plugin is therefore deprecated and will be removed from community\.general 10\.0\.0 if nobody provides compelling reasons to still keep it \([https\://github\.com/ansible\-collections/community\.general/issues/8184](https\://github\.com/ansible\-collections/community\.general/issues/8184)\, [https\://github\.com/ansible\-collections/community\.general/pull/8189](https\://github\.com/ansible\-collections/community\.general/pull/8189)\)\.
-* irc \- the defaults false
for use\_tls
and validate\_certs
have been deprecated and will change to true
in community\.general 10\.0\.0 to improve security\. You can already improve security now by explicitly setting them to true
\. Specifying values now disables the deprecation warning \([https\://github\.com/ansible\-collections/community\.general/pull/7578](https\://github\.com/ansible\-collections/community\.general/pull/7578)\)\.
-
-
-### Removed Features \(previously deprecated\)
-
-* The deprecated redirects for internal module names have been removed\. These internal redirects were extra\-long FQCNs like community\.general\.packaging\.os\.apt\_rpm
that redirect to the short FQCN community\.general\.apt\_rpm
\. They were originally needed to implement flatmapping\; as various tooling started to recommend users to use the long names flatmapping was removed from the collection and redirects were added for users who already followed these incorrect recommendations \([https\://github\.com/ansible\-collections/community\.general/pull/7835](https\://github\.com/ansible\-collections/community\.general/pull/7835)\)\.
-* ansible\_galaxy\_install \- the ack\_ansible29
and ack\_min\_ansiblecore211
options have been removed\. They no longer had any effect \([https\://github\.com/ansible\-collections/community\.general/pull/8198](https\://github\.com/ansible\-collections/community\.general/pull/8198)\)\.
-* cloudflare\_dns \- remove support for SPF records\. These are no longer supported by CloudFlare \([https\://github\.com/ansible\-collections/community\.general/pull/7782](https\://github\.com/ansible\-collections/community\.general/pull/7782)\)\.
-* django\_manage \- support for the command
values cleanup
\, syncdb
\, and validate
were removed\. Use clearsessions
\, migrate
\, and check
instead\, respectively \([https\://github\.com/ansible\-collections/community\.general/pull/8198](https\://github\.com/ansible\-collections/community\.general/pull/8198)\)\.
-* flowdock \- this module relied on HTTPS APIs that do not exist anymore and was thus removed \([https\://github\.com/ansible\-collections/community\.general/pull/8198](https\://github\.com/ansible\-collections/community\.general/pull/8198)\)\.
-* mh\.mixins\.deps module utils \- the DependencyMixin
has been removed\. Use the deps
module utils instead \([https\://github\.com/ansible\-collections/community\.general/pull/8198](https\://github\.com/ansible\-collections/community\.general/pull/8198)\)\.
-* proxmox \- the proxmox\_default\_behavior
option has been removed \([https\://github\.com/ansible\-collections/community\.general/pull/8198](https\://github\.com/ansible\-collections/community\.general/pull/8198)\)\.
-* rax\* modules\, rax module utils\, rax docs fragment \- the Rackspace modules relied on the deprecated package pyrax
and were thus removed \([https\://github\.com/ansible\-collections/community\.general/pull/8198](https\://github\.com/ansible\-collections/community\.general/pull/8198)\)\.
-* redhat module utils \- the classes Rhsm
\, RhsmPool
\, and RhsmPools
have been removed \([https\://github\.com/ansible\-collections/community\.general/pull/8198](https\://github\.com/ansible\-collections/community\.general/pull/8198)\)\.
-* redhat\_subscription \- the alias autosubscribe
of the auto\_attach
option was removed \([https\://github\.com/ansible\-collections/community\.general/pull/8198](https\://github\.com/ansible\-collections/community\.general/pull/8198)\)\.
-* stackdriver \- this module relied on HTTPS APIs that do not exist anymore and was thus removed \([https\://github\.com/ansible\-collections/community\.general/pull/8198](https\://github\.com/ansible\-collections/community\.general/pull/8198)\)\.
-* webfaction\_\* modules \- these modules relied on HTTPS APIs that do not exist anymore and were thus removed \([https\://github\.com/ansible\-collections/community\.general/pull/8198](https\://github\.com/ansible\-collections/community\.general/pull/8198)\)\.
-
-
-### Security Fixes
-
-* cobbler\, gitlab\_runners\, icinga2\, linode\, lxd\, nmap\, online\, opennebula\, proxmox\, scaleway\, stackpath\_compute\, virtualbox\, and xen\_orchestra inventory plugin \- make sure all data received from the remote servers is marked as unsafe\, so remote code execution by obtaining texts that can be evaluated as templates is not possible \([https\://www\.die\-welt\.net/2024/03/remote\-code\-execution\-in\-ansible\-dynamic\-inventory\-plugins/](https\://www\.die\-welt\.net/2024/03/remote\-code\-execution\-in\-ansible\-dynamic\-inventory\-plugins/)\, [https\://github\.com/ansible\-collections/community\.general/pull/8098](https\://github\.com/ansible\-collections/community\.general/pull/8098)\)\.
-* keycloak\_identity\_provider \- the client secret was not correctly sanitized by the module\. The return values proposed
\, existing
\, and end\_state
\, as well as the diff\, did contain the client secret unmasked \([https\://github\.com/ansible\-collections/community\.general/pull/8355](https\://github\.com/ansible\-collections/community\.general/pull/8355)\)\.
-
-
-### Bugfixes
-
-* aix\_filesystem \- fix \_validate\_vg
not passing VG name to lsvg\_cmd
\([https\://github\.com/ansible\-collections/community\.general/issues/8151](https\://github\.com/ansible\-collections/community\.general/issues/8151)\)\.
-* aix\_filesystem \- fix issue with empty list items in crfs logic and option order \([https\://github\.com/ansible\-collections/community\.general/pull/8052](https\://github\.com/ansible\-collections/community\.general/pull/8052)\)\.
-* apt\-rpm \- the module did not upgrade packages if a newer version exists\. Now the package will be reinstalled if the candidate is newer than the installed version \([https\://github\.com/ansible\-collections/community\.general/issues/7414](https\://github\.com/ansible\-collections/community\.general/issues/7414)\)\.
-* apt\_rpm \- when checking whether packages were installed after running apt\-get \-y install \
\, only the last package name was checked \([https\://github\.com/ansible\-collections/community\.general/pull/8263](https\://github\.com/ansible\-collections/community\.general/pull/8263)\)\.
-* bitwarden\_secrets\_manager lookup plugin \- implements retry with exponential backoff to avoid lookup errors when Bitwardn\'s API rate limiting is encountered \([https\://github\.com/ansible\-collections/community\.general/issues/8230](https\://github\.com/ansible\-collections/community\.general/issues/8230)\, [https\://github\.com/ansible\-collections/community\.general/pull/8238](https\://github\.com/ansible\-collections/community\.general/pull/8238)\)\.
-* cargo \- fix idempotency issues when using a custom installation path for packages \(using the \-\-path
parameter\)\. The initial installation runs fine\, but subsequent runs use the get\_installed\(\)
function which did not check the given installation location\, before running cargo install
\. This resulted in a false changed
state\. Also the removal of packeges using state\: absent
failed\, as the installation check did not use the given parameter \([https\://github\.com/ansible\-collections/community\.general/pull/7970](https\://github\.com/ansible\-collections/community\.general/pull/7970)\)\.
-* cloudflare\_dns \- fix Cloudflare lookup of SHFP records \([https\://github\.com/ansible\-collections/community\.general/issues/7652](https\://github\.com/ansible\-collections/community\.general/issues/7652)\)\.
-* consul\_token \- fix token creation without accessor\_id
\([https\://github\.com/ansible\-collections/community\.general/pull/8091](https\://github\.com/ansible\-collections/community\.general/pull/8091)\)\.
-* from\_ini filter plugin \- disabling interpolation of ConfigParser
to allow converting values with a \%
sign \([https\://github\.com/ansible\-collections/community\.general/issues/8183](https\://github\.com/ansible\-collections/community\.general/issues/8183)\, [https\://github\.com/ansible\-collections/community\.general/pull/8185](https\://github\.com/ansible\-collections/community\.general/pull/8185)\)\.
-* gitlab\_group\_members \- fix gitlab constants call in gitlab\_group\_members
module \([https\://github\.com/ansible\-collections/community\.general/issues/7467](https\://github\.com/ansible\-collections/community\.general/issues/7467)\)\.
-* gitlab\_issue \- fix behavior to search GitLab issue\, using search
keyword instead of title
\([https\://github\.com/ansible\-collections/community\.general/issues/7846](https\://github\.com/ansible\-collections/community\.general/issues/7846)\)\.
-* gitlab\_issue\, gitlab\_label\, gitlab\_milestone \- avoid crash during version comparison when the python\-gitlab Python module is not installed \([https\://github\.com/ansible\-collections/community\.general/pull/8158](https\://github\.com/ansible\-collections/community\.general/pull/8158)\)\.
-* gitlab\_project\_members \- fix gitlab constants call in gitlab\_project\_members
module \([https\://github\.com/ansible\-collections/community\.general/issues/7467](https\://github\.com/ansible\-collections/community\.general/issues/7467)\)\.
-* gitlab\_protected\_branches \- fix gitlab constants call in gitlab\_protected\_branches
module \([https\://github\.com/ansible\-collections/community\.general/issues/7467](https\://github\.com/ansible\-collections/community\.general/issues/7467)\)\.
-* gitlab\_runner \- fix pagination when checking for existing runners \([https\://github\.com/ansible\-collections/community\.general/pull/7790](https\://github\.com/ansible\-collections/community\.general/pull/7790)\)\.
-* gitlab\_user \- fix gitlab constants call in gitlab\_user
module \([https\://github\.com/ansible\-collections/community\.general/issues/7467](https\://github\.com/ansible\-collections/community\.general/issues/7467)\)\.
-* haproxy \- fix an issue where HAProxy could get stuck in DRAIN mode when the backend was unreachable \([https\://github\.com/ansible\-collections/community\.general/issues/8092](https\://github\.com/ansible\-collections/community\.general/issues/8092)\)\.
-* homebrew \- detect already installed formulae and casks using JSON output from brew info
\([https\://github\.com/ansible\-collections/community\.general/issues/864](https\://github\.com/ansible\-collections/community\.general/issues/864)\)\.
-* homebrew \- error returned from brew command was ignored and tried to parse empty JSON\. Fix now checks for an error and raises it to give accurate error message to users \([https\://github\.com/ansible\-collections/community\.general/issues/8047](https\://github\.com/ansible\-collections/community\.general/issues/8047)\)\.
-* incus connection plugin \- treats inventory\_hostname
as a variable instead of a literal in remote connections \([https\://github\.com/ansible\-collections/community\.general/issues/7874](https\://github\.com/ansible\-collections/community\.general/issues/7874)\)\.
-* interface\_files \- also consider address\_family
when changing option\=method
\([https\://github\.com/ansible\-collections/community\.general/issues/7610](https\://github\.com/ansible\-collections/community\.general/issues/7610)\, [https\://github\.com/ansible\-collections/community\.general/pull/7612](https\://github\.com/ansible\-collections/community\.general/pull/7612)\)\.
-* inventory plugins \- add unsafe wrapper to avoid marking strings that do not contain \{
or \}
as unsafe\, to work around a bug in AWX \(\([https\://github\.com/ansible\-collections/community\.general/issues/8212](https\://github\.com/ansible\-collections/community\.general/issues/8212)\, [https\://github\.com/ansible\-collections/community\.general/pull/8225](https\://github\.com/ansible\-collections/community\.general/pull/8225)\)\.
-* ipa \- fix get version regex in IPA module\_utils \([https\://github\.com/ansible\-collections/community\.general/pull/8175](https\://github\.com/ansible\-collections/community\.general/pull/8175)\)\.
-* ipa\_hbacrule \- the module uses a string for ipaenabledflag
for new FreeIPA versions while the returned value is a boolean \([https\://github\.com/ansible\-collections/community\.general/pull/7880](https\://github\.com/ansible\-collections/community\.general/pull/7880)\)\.
-* ipa\_otptoken \- the module expect ipatokendisabled
as string but the ipatokendisabled
value is returned as a boolean \([https\://github\.com/ansible\-collections/community\.general/pull/7795](https\://github\.com/ansible\-collections/community\.general/pull/7795)\)\.
-* ipa\_sudorule \- the module uses a string for ipaenabledflag
for new FreeIPA versions while the returned value is a boolean \([https\://github\.com/ansible\-collections/community\.general/pull/7880](https\://github\.com/ansible\-collections/community\.general/pull/7880)\)\.
-* iptables\_state \- fix idempotency issues when restoring incomplete iptables dumps \([https\://github\.com/ansible\-collections/community\.general/issues/8029](https\://github\.com/ansible\-collections/community\.general/issues/8029)\)\.
-* irc \- replace ssl\.wrap\_socket
that was removed from Python 3\.12 with code for creating a proper SSL context \([https\://github\.com/ansible\-collections/community\.general/pull/7542](https\://github\.com/ansible\-collections/community\.general/pull/7542)\)\.
-* keycloak\_\* \- fix Keycloak API client to quote /
properly \([https\://github\.com/ansible\-collections/community\.general/pull/7641](https\://github\.com/ansible\-collections/community\.general/pull/7641)\)\.
-* keycloak\_authz\_permission \- resource payload variable for scope\-based permission was constructed as a string\, when it needs to be a list\, even for a single item \([https\://github\.com/ansible\-collections/community\.general/issues/7151](https\://github\.com/ansible\-collections/community\.general/issues/7151)\)\.
-* keycloak\_client \- add sorted defaultClientScopes
and optionalClientScopes
to normalizations \([https\://github\.com/ansible\-collections/community\.general/pull/8223](https\://github\.com/ansible\-collections/community\.general/pull/8223)\)\.
-* keycloak\_client \- fixes issue when metadata is provided in desired state when task is in check mode \([https\://github\.com/ansible\-collections/community\.general/issues/1226](https\://github\.com/ansible\-collections/community\.general/issues/1226)\, [https\://github\.com/ansible\-collections/community\.general/pull/7881](https\://github\.com/ansible\-collections/community\.general/pull/7881)\)\.
-* keycloak\_identity\_provider \- mappers
processing was not idempotent if the mappers configuration list had not been sorted by name \(in ascending order\)\. Fix resolves the issue by sorting mappers in the desired state using the same key which is used for obtaining existing state \([https\://github\.com/ansible\-collections/community\.general/pull/7418](https\://github\.com/ansible\-collections/community\.general/pull/7418)\)\.
-* keycloak\_identity\_provider \- it was not possible to reconfigure \(add\, remove\) mappers
once they were created initially\. Removal was ignored\, adding new ones resulted in dropping the pre\-existing unmodified mappers\. Fix resolves the issue by supplying correct input to the internal update call \([https\://github\.com/ansible\-collections/community\.general/pull/7418](https\://github\.com/ansible\-collections/community\.general/pull/7418)\)\.
-* keycloak\_realm \- add normalizations for enabledEventTypes
and supportedLocales
\([https\://github\.com/ansible\-collections/community\.general/pull/8224](https\://github\.com/ansible\-collections/community\.general/pull/8224)\)\.
-* keycloak\_user \- when force
is set\, but user does not exist\, do not try to delete it \([https\://github\.com/ansible\-collections/community\.general/pull/7696](https\://github\.com/ansible\-collections/community\.general/pull/7696)\)\.
-* keycloak\_user\_federation \- fix diff of empty krbPrincipalAttribute
\([https\://github\.com/ansible\-collections/community\.general/pull/8320](https\://github\.com/ansible\-collections/community\.general/pull/8320)\)\.
-* ldap \- previously the order number \(if present\) was expected to follow an equals sign in the DN\. This makes it so the order number string is identified correctly anywhere within the DN \([https\://github\.com/ansible\-collections/community\.general/issues/7646](https\://github\.com/ansible\-collections/community\.general/issues/7646)\)\.
-* linode inventory plugin \- add descriptive error message for linode inventory plugin \([https\://github\.com/ansible\-collections/community\.general/pull/8133](https\://github\.com/ansible\-collections/community\.general/pull/8133)\)\.
-* log\_entries callback plugin \- replace ssl\.wrap\_socket
that was removed from Python 3\.12 with code for creating a proper SSL context \([https\://github\.com/ansible\-collections/community\.general/pull/7542](https\://github\.com/ansible\-collections/community\.general/pull/7542)\)\.
-* lvol \- test for output messages in both stdout
and stderr
\([https\://github\.com/ansible\-collections/community\.general/pull/7601](https\://github\.com/ansible\-collections/community\.general/pull/7601)\, [https\://github\.com/ansible\-collections/community\.general/issues/7182](https\://github\.com/ansible\-collections/community\.general/issues/7182)\)\.
-* merge\_variables lookup plugin \- fixing cross host merge\: providing access to foreign hosts variables to the perspective of the host that is performing the merge \([https\://github\.com/ansible\-collections/community\.general/pull/8303](https\://github\.com/ansible\-collections/community\.general/pull/8303)\)\.
-* modprobe \- listing modules files or modprobe files could trigger a FileNotFoundError if /etc/modprobe\.d
or /etc/modules\-load\.d
did not exist\. Relevant functions now return empty lists if the directories do not exist to avoid crashing the module \([https\://github\.com/ansible\-collections/community\.general/issues/7717](https\://github\.com/ansible\-collections/community\.general/issues/7717)\)\.
-* mssql\_script \- make the module work with Python 2 \([https\://github\.com/ansible\-collections/community\.general/issues/7818](https\://github\.com/ansible\-collections/community\.general/issues/7818)\, [https\://github\.com/ansible\-collections/community\.general/pull/7821](https\://github\.com/ansible\-collections/community\.general/pull/7821)\)\.
-* nmcli \- fix connection\.slave\-type
wired to bond
and not with parameter slave\_type
in case of connection type wifi
\([https\://github\.com/ansible\-collections/community\.general/issues/7389](https\://github\.com/ansible\-collections/community\.general/issues/7389)\)\.
-* ocapi\_utils\, oci\_utils\, redfish\_utils module utils \- replace type\(\)
calls with isinstance\(\)
calls \([https\://github\.com/ansible\-collections/community\.general/pull/7501](https\://github\.com/ansible\-collections/community\.general/pull/7501)\)\.
-* onepassword lookup plugin \- failed for fields that were in sections and had uppercase letters in the label/ID\. Field lookups are now case insensitive in all cases \([https\://github\.com/ansible\-collections/community\.general/pull/7919](https\://github\.com/ansible\-collections/community\.general/pull/7919)\)\.
-* onepassword lookup plugin \- field and section titles are now case insensitive when using op CLI version two or later\. This matches the behavior of version one \([https\://github\.com/ansible\-collections/community\.general/pull/7564](https\://github\.com/ansible\-collections/community\.general/pull/7564)\)\.
-* opentelemetry callback plugin \- close spans always \([https\://github\.com/ansible\-collections/community\.general/pull/8367](https\://github\.com/ansible\-collections/community\.general/pull/8367)\)\.
-* opentelemetry callback plugin \- honour the disable\_logs
option to avoid storing task results since they are not used regardless \([https\://github\.com/ansible\-collections/community\.general/pull/8373](https\://github\.com/ansible\-collections/community\.general/pull/8373)\)\.
-* pacemaker\_cluster \- actually implement check mode\, which the module claims to support\. This means that until now the module also did changes in check mode \([https\://github\.com/ansible\-collections/community\.general/pull/8081](https\://github\.com/ansible\-collections/community\.general/pull/8081)\)\.
-* pam\_limits \- when the file does not exist\, do not create it in check mode \([https\://github\.com/ansible\-collections/community\.general/issues/8050](https\://github\.com/ansible\-collections/community\.general/issues/8050)\, [https\://github\.com/ansible\-collections/community\.general/pull/8057](https\://github\.com/ansible\-collections/community\.general/pull/8057)\)\.
-* pipx module utils \- change the CLI argument formatter for the pip\_args
parameter \([https\://github\.com/ansible\-collections/community\.general/issues/7497](https\://github\.com/ansible\-collections/community\.general/issues/7497)\, [https\://github\.com/ansible\-collections/community\.general/pull/7506](https\://github\.com/ansible\-collections/community\.general/pull/7506)\)\.
-* pkgin \- pkgin \(pkgsrc package manager used by SmartOS\) raises erratic exceptions and spurious changed\=true
\([https\://github\.com/ansible\-collections/community\.general/pull/7971](https\://github\.com/ansible\-collections/community\.general/pull/7971)\)\.
-* proxmox \- fix updating a container config if the setting does not already exist \([https\://github\.com/ansible\-collections/community\.general/pull/7872](https\://github\.com/ansible\-collections/community\.general/pull/7872)\)\.
-* proxmox\_kvm \- fixed status check getting from node\-specific API endpoint \([https\://github\.com/ansible\-collections/community\.general/issues/7817](https\://github\.com/ansible\-collections/community\.general/issues/7817)\)\.
-* proxmox\_kvm \- running state\=template
will first check whether VM is already a template \([https\://github\.com/ansible\-collections/community\.general/pull/7792](https\://github\.com/ansible\-collections/community\.general/pull/7792)\)\.
-* proxmox\_pool\_member \- absent state for type VM did not delete VMs from the pools \([https\://github\.com/ansible\-collections/community\.general/pull/7464](https\://github\.com/ansible\-collections/community\.general/pull/7464)\)\.
-* puppet \- add option environment\_lang
to set the environment language encoding\. Defaults to lang C
\. It is recommended to set it to C\.UTF\-8
or en\_US\.UTF\-8
depending on what is available on your system\. \([https\://github\.com/ansible\-collections/community\.general/issues/8000](https\://github\.com/ansible\-collections/community\.general/issues/8000)\)
-* redfish\_command \- fix usage of message parsing in SimpleUpdate
and MultipartHTTPPushUpdate
commands to treat the lack of a MessageId
as no message \([https\://github\.com/ansible\-collections/community\.general/issues/7465](https\://github\.com/ansible\-collections/community\.general/issues/7465)\, [https\://github\.com/ansible\-collections/community\.general/pull/7471](https\://github\.com/ansible\-collections/community\.general/pull/7471)\)\.
-* redfish\_info \- allow for a GET operation invoked by GetUpdateStatus
to allow for an empty response body for cases where a service returns 204 No Content \([https\://github\.com/ansible\-collections/community\.general/issues/8003](https\://github\.com/ansible\-collections/community\.general/issues/8003)\)\.
-* redfish\_info \- correct uncaught exception when attempting to retrieve Chassis
information \([https\://github\.com/ansible\-collections/community\.general/pull/7952](https\://github\.com/ansible\-collections/community\.general/pull/7952)\)\.
-* redhat\_subscription \- use the D\-Bus registration on RHEL 7 only on 7\.4 and
- greater\; older versions of RHEL 7 do not have it
- \([https\://github\.com/ansible\-collections/community\.general/issues/7622](https\://github\.com/ansible\-collections/community\.general/issues/7622)\,
- [https\://github\.com/ansible\-collections/community\.general/pull/7624](https\://github\.com/ansible\-collections/community\.general/pull/7624)\)\.
-* riak \- support riak admin
sub\-command in newer Riak KV versions beside the legacy riak\-admin
main command \([https\://github\.com/ansible\-collections/community\.general/pull/8211](https\://github\.com/ansible\-collections/community\.general/pull/8211)\)\.
-* statusio\_maintenance \- fix error caused by incorrectly formed API data payload\. Was raising \"Failed to create maintenance HTTP Error 400 Bad Request\" caused by bad data type for date/time and deprecated dict keys \([https\://github\.com/ansible\-collections/community\.general/pull/7754](https\://github\.com/ansible\-collections/community\.general/pull/7754)\)\.
-* terraform \- fix multiline string handling in complex variables \([https\://github\.com/ansible\-collections/community\.general/pull/7535](https\://github\.com/ansible\-collections/community\.general/pull/7535)\)\.
-* to\_ini filter plugin \- disabling interpolation of ConfigParser
to allow converting values with a \%
sign \([https\://github\.com/ansible\-collections/community\.general/issues/8183](https\://github\.com/ansible\-collections/community\.general/issues/8183)\, [https\://github\.com/ansible\-collections/community\.general/pull/8185](https\://github\.com/ansible\-collections/community\.general/pull/8185)\)\.
-* xml \- make module work with lxml 5\.1\.1\, which removed some internals that the module was relying on \([https\://github\.com/ansible\-collections/community\.general/pull/8169](https\://github\.com/ansible\-collections/community\.general/pull/8169)\)\.
-
-
-### New Plugins
-
-
-#### Become
-
-* community\.general\.run0 \- Systemd\'s run0\.
-
-
-#### Callback
-
-* community\.general\.default\_without\_diff \- The default ansible callback without diff output\.
-* community\.general\.timestamp \- Adds simple timestamp for each header\.
-
-
-#### Connection
-
-* community\.general\.incus \- Run tasks in Incus instances via the Incus CLI\.
-
-
-#### Filter
-
-* community\.general\.from\_ini \- Converts INI text input into a dictionary\.
-* community\.general\.lists\_difference \- Difference of lists with a predictive order\.
-* community\.general\.lists\_intersect \- Intersection of lists with a predictive order\.
-* community\.general\.lists\_symmetric\_difference \- Symmetric Difference of lists with a predictive order\.
-* community\.general\.lists\_union \- Union of lists with a predictive order\.
-* community\.general\.to\_ini \- Converts a dictionary to the INI file format\.
-
-
-#### Lookup
-
-* community\.general\.github\_app\_access\_token \- Obtain short\-lived Github App Access tokens\.
-* community\.general\.onepassword\_doc \- Fetch documents stored in 1Password\.
-
-
-#### Test
-
-* community\.general\.fqdn\_valid \- Validates fully\-qualified domain names against RFC 1123\.
-
-
-### New Modules
-
-* community\.general\.consul\_acl\_bootstrap \- Bootstrap ACLs in Consul\.
-* community\.general\.consul\_auth\_method \- Manipulate Consul auth methods\.
-* community\.general\.consul\_binding\_rule \- Manipulate Consul binding rules\.
-* community\.general\.consul\_token \- Manipulate Consul tokens\.
-* community\.general\.django\_command \- Run Django admin commands\.
-* community\.general\.dnf\_config\_manager \- Enable or disable dnf repositories using config\-manager\.
-* community\.general\.git\_config\_info \- Read git configuration\.
-* community\.general\.gitlab\_group\_access\_token \- Manages GitLab group access tokens\.
-* community\.general\.gitlab\_issue \- Create\, update\, or delete GitLab issues\.
-* community\.general\.gitlab\_label \- Creates/updates/deletes GitLab Labels belonging to project or group\.
-* community\.general\.gitlab\_milestone \- Creates/updates/deletes GitLab Milestones belonging to project or group\.
-* community\.general\.gitlab\_project\_access\_token \- Manages GitLab project access tokens\.
-* community\.general\.keycloak\_client\_rolescope \- Allows administration of Keycloak client roles scope to restrict the usage of certain roles to a other specific client applications\.
-* community\.general\.keycloak\_component\_info \- Retrive component info in Keycloak\.
-* community\.general\.keycloak\_realm\_rolemapping \- Allows administration of Keycloak realm role mappings into groups with the Keycloak API\.
-* community\.general\.nomad\_token \- Manage Nomad ACL tokens\.
-* community\.general\.proxmox\_node\_info \- Retrieve information about one or more Proxmox VE nodes\.
-* community\.general\.proxmox\_storage\_contents\_info \- List content from a Proxmox VE storage\.
-* community\.general\.usb\_facts \- Allows listing information about USB devices\.
+This file is a placeholder; a version-specific `CHANGELOG-vX.md` will be generated during releases from fragments
+under `changelogs/fragments`. On release branches once a release has been created, consult the branch's version-specific
+file for changes that have occurred in that branch.
diff --git a/CHANGELOG.rst b/CHANGELOG.rst
index 0c334b5ae8..119e04e170 100644
--- a/CHANGELOG.rst
+++ b/CHANGELOG.rst
@@ -1,938 +1,6 @@
-===============================
-Community General Release Notes
-===============================
+Placeholder changelog
+=====================
-.. contents:: Topics
-
-This changelog describes changes after version 8.0.0.
-
-v9.5.9
-======
-
-Release Summary
----------------
-
-Bugfix release.
-
-Bugfixes
---------
-
-- yaml callback plugin - adjust to latest changes in ansible-core devel (https://github.com/ansible-collections/community.general/pull/10212).
-- yaml callback plugin - when using ansible-core 2.19.0b2 or newer, uses a new utility provided by ansible-core. This allows us to remove all hacks and vendored code that was part of the plugin for ansible-core versions with Data Tagging so far (https://github.com/ansible-collections/community.general/pull/10242).
-
-v9.5.8
-======
-
-Release Summary
----------------
-
-Regular bugfix release.
-
-Bugfixes
---------
-
-- cobbler_system - fix bug with Cobbler >= 3.4.0 caused by giving more than 2 positional arguments to ``CobblerXMLRPCInterface.get_system_handle()`` (https://github.com/ansible-collections/community.general/issues/8506, https://github.com/ansible-collections/community.general/pull/10145).
-- kdeconfig - allow option values beginning with a dash (https://github.com/ansible-collections/community.general/issues/10127, https://github.com/ansible-collections/community.general/pull/10128).
-- keycloak_user_rolemapping - fix ``--diff`` mode (https://github.com/ansible-collections/community.general/issues/10067, https://github.com/ansible-collections/community.general/pull/10075).
-- pickle cache plugin - avoid extra JSON serialization with ansible-core >= 2.19 (https://github.com/ansible-collections/community.general/pull/10136).
-- rundeck_acl_policy - ensure that project ACLs are sent to the correct endpoint (https://github.com/ansible-collections/community.general/pull/10097).
-- sysrc - split the output of ``sysrc -e -a`` on the first ``=`` only (https://github.com/ansible-collections/community.general/issues/10120, https://github.com/ansible-collections/community.general/pull/10121).
-
-v9.5.7
-======
-
-Release Summary
----------------
-
-Regular bugfix release.
-
-Minor Changes
--------------
-
-- apache2_module - added workaround for new PHP module name, from ``php7_module`` to ``php_module`` (https://github.com/ansible-collections/community.general/pull/9951).
-
-Bugfixes
---------
-
-- dependent look plugin - make compatible with ansible-core's Data Tagging feature (https://github.com/ansible-collections/community.general/pull/9833).
-- diy callback plugin - make compatible with ansible-core's Data Tagging feature (https://github.com/ansible-collections/community.general/pull/9833).
-- github_deploy_key - check that key really exists on 422 to avoid masking other errors (https://github.com/ansible-collections/community.general/issues/6718, https://github.com/ansible-collections/community.general/pull/10011).
-- hashids and unicode_normalize filter plugins - avoid deprecated ``AnsibleFilterTypeError`` on ansible-core 2.19 (https://github.com/ansible-collections/community.general/pull/9992).
-- keycloak_authentication - fix authentification config duplication for Keycloak < 26.2.0 (https://github.com/ansible-collections/community.general/pull/9987).
-- keycloak_client - fix the idempotency regression by normalizing the Keycloak response for ``after_client`` (https://github.com/ansible-collections/community.general/issues/9905, https://github.com/ansible-collections/community.general/pull/9976).
-- proxmox inventory plugin - fix ``ansible_host`` staying empty for certain Proxmox nodes (https://github.com/ansible-collections/community.general/issues/5906, https://github.com/ansible-collections/community.general/pull/9952).
-- proxmox_disk - fail gracefully if ``storage`` is required but not provided by the user (https://github.com/ansible-collections/community.general/issues/9941, https://github.com/ansible-collections/community.general/pull/9963).
-- reveal_ansible_type filter plugin and ansible_type test plugin - make compatible with ansible-core's Data Tagging feature (https://github.com/ansible-collections/community.general/pull/9833).
-- sysrc - no longer always reporting ``changed=true`` when ``state=absent``. This fixes the method ``exists()`` (https://github.com/ansible-collections/community.general/issues/10004, https://github.com/ansible-collections/community.general/pull/10005).
-- yaml callback plugin - use ansible-core internals to avoid breakage with Data Tagging (https://github.com/ansible-collections/community.general/pull/9833).
-
-Known Issues
-------------
-
-- reveal_ansible_type filter plugin and ansible_type test plugin - note that ansible-core's Data Tagging feature implements new aliases, such as ``_AnsibleTaggedStr`` for ``str``, ``_AnsibleTaggedInt`` for ``int``, and ``_AnsibleTaggedFloat`` for ``float`` (https://github.com/ansible-collections/community.general/pull/9833).
-
-v9.5.6
-======
-
-Release Summary
----------------
-
-Regular bugfix release.
-
-Minor Changes
--------------
-
-- consul_token - fix idempotency when ``policies`` or ``roles`` are supplied by name (https://github.com/ansible-collections/community.general/issues/9841, https://github.com/ansible-collections/community.general/pull/9845).
-
-Bugfixes
---------
-
-- cloudlare_dns - handle exhausted response stream in case of HTTP errors to show nice error message to the user (https://github.com/ansible-collections/community.general/issues/9782, https://github.com/ansible-collections/community.general/pull/9818).
-- dnf_versionlock - add support for dnf5 (https://github.com/ansible-collections/community.general/issues/9556).
-- homebrew_cask - handle unusual brew version strings (https://github.com/ansible-collections/community.general/issues/8432, https://github.com/ansible-collections/community.general/pull/9881).
-- ipa_host - module revoked existing host certificates even if ``user_certificate`` was not given (https://github.com/ansible-collections/community.general/pull/9694).
-- nmcli - enable changing only the order of DNS servers or search suffixes (https://github.com/ansible-collections/community.general/issues/8724, https://github.com/ansible-collections/community.general/pull/9880).
-- proxmox_vm_info - the module no longer expects that the key ``template`` exists in a dictionary returned by Proxmox (https://github.com/ansible-collections/community.general/issues/9875, https://github.com/ansible-collections/community.general/pull/9910).
-- sudoers - display stdout and stderr raised while failed validation (https://github.com/ansible-collections/community.general/issues/9674, https://github.com/ansible-collections/community.general/pull/9871).
-
-v9.5.5
-======
-
-Release Summary
----------------
-
-Regular bugfix release.
-
-Bugfixes
---------
-
-- apache2_mod_proxy - make compatible with Python 3 (https://github.com/ansible-collections/community.general/pull/9762).
-- apache2_mod_proxy - passing the cluster's page as referer for the member's pages. This makes the module actually work again for halfway modern Apache versions. According to some comments founds on the net the referer was required since at least 2019 for some versions of Apache 2 (https://github.com/ansible-collections/community.general/pull/9762).
-- cloudflare_dns - fix crash when deleting a DNS record or when updating a record with ``solo=true`` (https://github.com/ansible-collections/community.general/issues/9652, https://github.com/ansible-collections/community.general/pull/9649).
-- elasticsearch_plugin - fix ``ERROR: D is not a recognized option`` issue when configuring proxy settings (https://github.com/ansible-collections/community.general/pull/9774, https://github.com/ansible-collections/community.general/issues/9773).
-- keycloak_client - fix and improve existing tests. The module showed a diff without actual changes, solved by improving the ``normalise_cr()`` function (https://github.com/ansible-collections/community.general/pull/9644).
-- keycloak_client - in check mode, detect whether the lists in before client (for example redirect URI list) contain items that the lists in the desired client do not contain (https://github.com/ansible-collections/community.general/pull/9739).
-- passwordstore lookup plugin - fix subkey creation even when ``create=false`` (https://github.com/ansible-collections/community.general/issues/9105, https://github.com/ansible-collections/community.general/pull/9106).
-- proxmox inventory plugin - plugin did not update cache correctly after ``meta: refresh_inventory`` (https://github.com/ansible-collections/community.general/issues/9710, https://github.com/ansible-collections/community.general/pull/9760).
-- redhat_subscription - use the "enable_content" option (when available) when
- registering using D-Bus, to ensure that subscription-manager enables the
- content on registration; this is particular important on EL 10+ and Fedora
- 41+
- (https://github.com/ansible-collections/community.general/pull/9778).
-- xml - ensure file descriptor is closed (https://github.com/ansible-collections/community.general/pull/9695).
-
-v9.5.4
-======
-
-Security Fixes
---------------
-
-- keycloak_client - Sanitize ``saml.encryption.private.key`` so it does not show in the logs (https://github.com/ansible-collections/community.general/pull/9621).
-
-Bugfixes
---------
-
-- redhat_subscription - do not try to unsubscribe (i.e. remove subscriptions)
- when unregistering a system: newer versions of subscription-manager, as
- available in EL 10 and Fedora 41+, do not support entitlements anymore, and
- thus unsubscribing will fail
- (https://github.com/ansible-collections/community.general/pull/9578).
-
-v9.5.3
-======
-
-Release Summary
----------------
-
-Regular bugfix release.
-
-Minor Changes
--------------
-
-- proxmox module utils - add method ``api_task_complete`` that can wait for task completion and return error message (https://github.com/ansible-collections/community.general/pull/9256).
-
-Security Fixes
---------------
-
-- keycloak_authentication - API calls did not properly set the ``priority`` during update resulting in incorrectly sorted authentication flows. This apparently only affects Keycloak 25 or newer (https://github.com/ansible-collections/community.general/pull/9263).
-
-Bugfixes
---------
-
-- dig lookup plugin - correctly handle ``NoNameserver`` exception (https://github.com/ansible-collections/community.general/pull/9363, https://github.com/ansible-collections/community.general/issues/9362).
-- htpasswd - report changes when file permissions are adjusted (https://github.com/ansible-collections/community.general/issues/9485, https://github.com/ansible-collections/community.general/pull/9490).
-- proxmox_disk - fix async method and make ``resize_disk`` method handle errors correctly (https://github.com/ansible-collections/community.general/pull/9256).
-- proxmox_template - fix the wrong path called on ``proxmox_template.task_status`` (https://github.com/ansible-collections/community.general/issues/9276, https://github.com/ansible-collections/community.general/pull/9277).
-- qubes connection plugin - fix the printing of debug information (https://github.com/ansible-collections/community.general/pull/9334).
-- redfish_utils module utils - Fix ``VerifyBiosAttributes`` command on multi system resource nodes (https://github.com/ansible-collections/community.general/pull/9234).
-
-v9.5.2
-======
-
-Release Summary
----------------
-
-Regular bugfix release.
-
-Minor Changes
--------------
-
-- proxmox inventory plugin - fix urllib3 ``InsecureRequestWarnings`` not being suppressed when a token is used (https://github.com/ansible-collections/community.general/pull/9099).
-
-Bugfixes
---------
-
-- dnf_config_manager - fix hanging when prompting to import GPG keys (https://github.com/ansible-collections/community.general/pull/9124, https://github.com/ansible-collections/community.general/issues/8830).
-- dnf_config_manager - forces locale to ``C`` before module starts. If the locale was set to non-English, the output of the ``dnf config-manager`` could not be parsed (https://github.com/ansible-collections/community.general/pull/9157, https://github.com/ansible-collections/community.general/issues/9046).
-- flatpak - force the locale language to ``C`` when running the flatpak command (https://github.com/ansible-collections/community.general/pull/9187, https://github.com/ansible-collections/community.general/issues/8883).
-- github_key - in check mode, a faulty call to ```datetime.strftime(...)``` was being made which generated an exception (https://github.com/ansible-collections/community.general/issues/9185).
-- homebrew_cask - allow ``+`` symbol in Homebrew cask name validation regex (https://github.com/ansible-collections/community.general/pull/9128).
-- keycloak_client - fix diff by removing code that turns the attributes dict which contains additional settings into a list (https://github.com/ansible-collections/community.general/pull/9077).
-- keycloak_clientscope - fix diff and ``end_state`` by removing the code that turns the attributes dict, which contains additional config items, into a list (https://github.com/ansible-collections/community.general/pull/9082).
-- keycloak_clientscope_type - sort the default and optional clientscope lists to improve the diff (https://github.com/ansible-collections/community.general/pull/9202).
-- redfish_utils module utils - remove undocumented default applytime (https://github.com/ansible-collections/community.general/pull/9114).
-- slack - fail if Slack API response is not OK with error message (https://github.com/ansible-collections/community.general/pull/9198).
-
-v9.5.1
-======
-
-Release Summary
----------------
-
-Regular bugfix release.
-
-Minor Changes
--------------
-
-- redfish_utils module utils - schedule a BIOS configuration job at next reboot when the BIOS config is changed (https://github.com/ansible-collections/community.general/pull/9012).
-
-Bugfixes
---------
-
-- bitwarden lookup plugin - support BWS v0.3.0 syntax breaking change (https://github.com/ansible-collections/community.general/pull/9028).
-- collection_version lookup plugin - use ``importlib`` directly instead of the deprecated and in ansible-core 2.19 removed ``ansible.module_utils.compat.importlib`` (https://github.com/ansible-collections/community.general/pull/9084).
-- gitlab_label - update label's color (https://github.com/ansible-collections/community.general/pull/9010).
-- keycloak_clientscope_type - fix detect changes in check mode (https://github.com/ansible-collections/community.general/issues/9092, https://github.com/ansible-collections/community.general/pull/9093).
-- keycloak_group - fix crash caused in subgroup creation. The crash was caused by a missing or empty ``subGroups`` property in Keycloak ≥23 (https://github.com/ansible-collections/community.general/issues/8788, https://github.com/ansible-collections/community.general/pull/8979).
-- modprobe - fix check mode not being honored for ``persistent`` option (https://github.com/ansible-collections/community.general/issues/9051, https://github.com/ansible-collections/community.general/pull/9052).
-- one_host - fix if statements for cases when ``ID=0`` (https://github.com/ansible-collections/community.general/issues/1199, https://github.com/ansible-collections/community.general/pull/8907).
-- one_image - fix module failing due to a class method typo (https://github.com/ansible-collections/community.general/pull/9056).
-- one_image_info - fix module failing due to a class method typo (https://github.com/ansible-collections/community.general/pull/9056).
-- one_vnet - fix module failing due to a variable typo (https://github.com/ansible-collections/community.general/pull/9019).
-- redfish_utils module utils - fix issue with URI parsing to gracefully handling trailing slashes when extracting member identifiers (https://github.com/ansible-collections/community.general/issues/9047, https://github.com/ansible-collections/community.general/pull/9057).
-
-v9.5.0
-======
-
-Release Summary
----------------
-
-Regular bugfix and feature release.
-
-Please note that this is the last feature release for community.general 9.x.y.
-From now on, new features will only go into community.general 10.x.y.
-
-Minor Changes
--------------
-
-- dig lookup plugin - add ``port`` option to specify DNS server port (https://github.com/ansible-collections/community.general/pull/8966).
-- flatpak - improve the parsing of Flatpak application IDs based on official guidelines (https://github.com/ansible-collections/community.general/pull/8909).
-- gio_mime - adjust code ahead of the old ``VardDict`` deprecation (https://github.com/ansible-collections/community.general/pull/8855).
-- gitlab_deploy_key - better construct when using ``dict.items()`` (https://github.com/ansible-collections/community.general/pull/8876).
-- gitlab_group - add many new parameters (https://github.com/ansible-collections/community.general/pull/8908).
-- gitlab_group - better construct when using ``dict.items()`` (https://github.com/ansible-collections/community.general/pull/8876).
-- gitlab_issue - better construct when using ``dict.items()`` (https://github.com/ansible-collections/community.general/pull/8876).
-- gitlab_merge_request - better construct when using ``dict.items()`` (https://github.com/ansible-collections/community.general/pull/8876).
-- gitlab_runner - better construct when using ``dict.items()`` (https://github.com/ansible-collections/community.general/pull/8876).
-- icinga2_host - replace loop with dict comprehension (https://github.com/ansible-collections/community.general/pull/8876).
-- jira - adjust code ahead of the old ``VardDict`` deprecation (https://github.com/ansible-collections/community.general/pull/8856).
-- keycloak_client - add ``client-x509`` choice to ``client_authenticator_type`` (https://github.com/ansible-collections/community.general/pull/8973).
-- keycloak_user_federation - add the user federation config parameter ``referral`` to the module arguments (https://github.com/ansible-collections/community.general/pull/8954).
-- memset_dns_reload - replace loop with ``dict()`` (https://github.com/ansible-collections/community.general/pull/8876).
-- memset_memstore_info - replace loop with ``dict()`` (https://github.com/ansible-collections/community.general/pull/8876).
-- memset_server_info - replace loop with ``dict()`` (https://github.com/ansible-collections/community.general/pull/8876).
-- memset_zone - replace loop with ``dict()`` (https://github.com/ansible-collections/community.general/pull/8876).
-- memset_zone_domain - replace loop with ``dict()`` (https://github.com/ansible-collections/community.general/pull/8876).
-- memset_zone_record - replace loop with ``dict()`` (https://github.com/ansible-collections/community.general/pull/8876).
-- nmcli - add ``conn_enable`` param to reload connection (https://github.com/ansible-collections/community.general/issues/3752, https://github.com/ansible-collections/community.general/issues/8704, https://github.com/ansible-collections/community.general/pull/8897).
-- nmcli - add ``state=up`` and ``state=down`` to enable/disable connections (https://github.com/ansible-collections/community.general/issues/3752, https://github.com/ansible-collections/community.general/issues/8704, https://github.com/ansible-collections/community.general/issues/7152, https://github.com/ansible-collections/community.general/pull/8897).
-- nmcli - better construct when using ``dict.items()`` (https://github.com/ansible-collections/community.general/pull/8876).
-- npm - add ``force`` parameter to allow ``--force`` (https://github.com/ansible-collections/community.general/pull/8885).
-- one_image - add option ``persistent`` to manage image persistence (https://github.com/ansible-collections/community.general/issues/3578, https://github.com/ansible-collections/community.general/pull/8889).
-- one_image - extend xsd scheme to make it return a lot more info about image (https://github.com/ansible-collections/community.general/pull/8889).
-- one_image - refactor code to make it more similar to ``one_template`` and ``one_vnet`` (https://github.com/ansible-collections/community.general/pull/8889).
-- one_image_info - extend xsd scheme to make it return a lot more info about image (https://github.com/ansible-collections/community.general/pull/8889).
-- one_image_info - refactor code to make it more similar to ``one_template`` and ``one_vnet`` (https://github.com/ansible-collections/community.general/pull/8889).
-- open_iscsi - allow login to a portal with multiple targets without specifying any of them (https://github.com/ansible-collections/community.general/pull/8719).
-- opennebula.py - add VM ``id`` and VM ``host`` to inventory host data (https://github.com/ansible-collections/community.general/pull/8532).
-- passwordstore lookup plugin - add subkey creation/update support (https://github.com/ansible-collections/community.general/pull/8952).
-- proxmox inventory plugin - clean up authentication code (https://github.com/ansible-collections/community.general/pull/8917).
-- redfish_command - add handling of the ``PasswordChangeRequired`` message from services in the ``UpdateUserPassword`` command to directly modify the user's password if the requested user is the one invoking the operation (https://github.com/ansible-collections/community.general/issues/8652, https://github.com/ansible-collections/community.general/pull/8653).
-- redfish_confg - remove ``CapacityBytes`` from required paramaters of the ``CreateVolume`` command (https://github.com/ansible-collections/community.general/pull/8956).
-- redfish_config - add parameter ``storage_none_volume_deletion`` to ``CreateVolume`` command in order to control the automatic deletion of non-RAID volumes (https://github.com/ansible-collections/community.general/pull/8990).
-- redfish_info - adds ``RedfishURI`` and ``StorageId`` to Disk inventory (https://github.com/ansible-collections/community.general/pull/8937).
-- scaleway_container - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8858).
-- scaleway_container_info - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8858).
-- scaleway_container_namespace - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8858).
-- scaleway_container_namespace_info - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8858).
-- scaleway_container_registry - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8858).
-- scaleway_container_registry_info - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8858).
-- scaleway_function - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8858).
-- scaleway_function_info - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8858).
-- scaleway_function_namespace - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8858).
-- scaleway_function_namespace_info - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8858).
-- scaleway_user_data - better construct when using ``dict.items()`` (https://github.com/ansible-collections/community.general/pull/8876).
-- udm_dns_record - replace loop with ``dict.update()`` (https://github.com/ansible-collections/community.general/pull/8876).
-
-Deprecated Features
--------------------
-
-- hipchat - the hipchat service has been discontinued and the self-hosted variant has been End of Life since 2020. The module is therefore deprecated and will be removed from community.general 11.0.0 if nobody provides compelling reasons to still keep it (https://github.com/ansible-collections/community.general/pull/8919).
-
-Bugfixes
---------
-
-- cloudflare_dns - fix changing Cloudflare SRV records (https://github.com/ansible-collections/community.general/issues/8679, https://github.com/ansible-collections/community.general/pull/8948).
-- cmd_runner module utils - call to ``get_best_parsable_locales()`` was missing parameter (https://github.com/ansible-collections/community.general/pull/8929).
-- dig lookup plugin - fix using only the last nameserver specified (https://github.com/ansible-collections/community.general/pull/8970).
-- django_command - option ``command`` is now split lexically before passed to underlying PythonRunner (https://github.com/ansible-collections/community.general/pull/8944).
-- homectl - the module now tries to use ``legacycrypt`` on Python 3.13+ (https://github.com/ansible-collections/community.general/issues/4691, https://github.com/ansible-collections/community.general/pull/8987).
-- ini_file - pass absolute paths to ``module.atomic_move()`` (https://github.com/ansible/ansible/issues/83950, https://github.com/ansible-collections/community.general/pull/8925).
-- ipa_host - add ``force_create``, fix ``enabled`` and ``disabled`` states (https://github.com/ansible-collections/community.general/issues/1094, https://github.com/ansible-collections/community.general/pull/8920).
-- ipa_hostgroup - fix ``enabled `` and ``disabled`` states (https://github.com/ansible-collections/community.general/issues/8408, https://github.com/ansible-collections/community.general/pull/8900).
-- java_keystore - pass absolute paths to ``module.atomic_move()`` (https://github.com/ansible/ansible/issues/83950, https://github.com/ansible-collections/community.general/pull/8925).
-- jenkins_plugin - pass absolute paths to ``module.atomic_move()`` (https://github.com/ansible/ansible/issues/83950, https://github.com/ansible-collections/community.general/pull/8925).
-- kdeconfig - pass absolute paths to ``module.atomic_move()`` (https://github.com/ansible/ansible/issues/83950, https://github.com/ansible-collections/community.general/pull/8925).
-- keycloak_realm - fix change detection in check mode by sorting the lists in the realms beforehand (https://github.com/ansible-collections/community.general/pull/8877).
-- keycloak_user_federation - add module argument allowing users to configure the update mode for the parameter ``bindCredential`` (https://github.com/ansible-collections/community.general/pull/8898).
-- keycloak_user_federation - minimize change detection by setting ``krbPrincipalAttribute`` to ``''`` in Keycloak responses if missing (https://github.com/ansible-collections/community.general/pull/8785).
-- keycloak_user_federation - remove ``lastSync`` parameter from Keycloak responses to minimize diff/changes (https://github.com/ansible-collections/community.general/pull/8812).
-- keycloak_userprofile - fix empty response when fetching userprofile component by removing ``parent=parent_id`` filter (https://github.com/ansible-collections/community.general/pull/8923).
-- keycloak_userprofile - improve diff by deserializing the fetched ``kc.user.profile.config`` and serialize it only when sending back (https://github.com/ansible-collections/community.general/pull/8940).
-- lxd_container - fix bug introduced in previous commit (https://github.com/ansible-collections/community.general/pull/8895, https://github.com/ansible-collections/community.general/issues/8888).
-- one_service - fix service creation after it was deleted with ``unique`` parameter (https://github.com/ansible-collections/community.general/issues/3137, https://github.com/ansible-collections/community.general/pull/8887).
-- pam_limits - pass absolute paths to ``module.atomic_move()`` (https://github.com/ansible/ansible/issues/83950, https://github.com/ansible-collections/community.general/pull/8925).
-- python_runner module utils - parameter ``path_prefix`` was being handled as string when it should be a list (https://github.com/ansible-collections/community.general/pull/8944).
-- udm_user - the module now tries to use ``legacycrypt`` on Python 3.13+ (https://github.com/ansible-collections/community.general/issues/4690, https://github.com/ansible-collections/community.general/pull/8987).
-
-New Modules
------------
-
-- community.general.ipa_getkeytab - Manage keytab file in FreeIPA.
-
-v9.4.0
-======
-
-Release Summary
----------------
-
-Bugfix and feature release.
-
-Minor Changes
--------------
-
-- MH module utils - add parameter ``when`` to ``cause_changes`` decorator (https://github.com/ansible-collections/community.general/pull/8766).
-- MH module utils - minor refactor in decorators (https://github.com/ansible-collections/community.general/pull/8766).
-- alternatives - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833).
-- apache2_mod_proxy - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814).
-- apache2_mod_proxy - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833).
-- consul_acl - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833).
-- copr - Added ``includepkgs`` and ``excludepkgs`` parameters to limit the list of packages fetched or excluded from the repository(https://github.com/ansible-collections/community.general/pull/8779).
-- credstash lookup plugin - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822).
-- csv module utils - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814).
-- deco MH module utils - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822).
-- etcd3 - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822).
-- gio_mime - mute the old ``VarDict`` deprecation (https://github.com/ansible-collections/community.general/pull/8776).
-- gitlab_group - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814).
-- gitlab_project - add option ``issues_access_level`` to enable/disable project issues (https://github.com/ansible-collections/community.general/pull/8760).
-- gitlab_project - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822).
-- gitlab_project - sorted parameters in order to avoid future merge conflicts (https://github.com/ansible-collections/community.general/pull/8759).
-- hashids filter plugin - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814).
-- hwc_ecs_instance - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822).
-- hwc_evs_disk - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822).
-- hwc_vpc_eip - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822).
-- hwc_vpc_peering_connect - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822).
-- hwc_vpc_port - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822).
-- hwc_vpc_subnet - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822).
-- imc_rest - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833).
-- ipa_otptoken - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822).
-- jira - mute the old ``VarDict`` deprecation (https://github.com/ansible-collections/community.general/pull/8776).
-- jira - replace deprecated params when using decorator ``cause_changes`` (https://github.com/ansible-collections/community.general/pull/8791).
-- keep_keys filter plugin - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814).
-- keycloak module utils - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822).
-- keycloak_client - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814).
-- keycloak_clientscope - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814).
-- keycloak_identity_provider - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814).
-- keycloak_user_federation - add module argument allowing users to optout of the removal of unspecified mappers, for example to keep the keycloak default mappers (https://github.com/ansible-collections/community.general/pull/8764).
-- keycloak_user_federation - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814).
-- keycloak_user_federation - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822).
-- keycloak_user_federation - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833).
-- linode - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814).
-- lxc_container - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822).
-- lxd_container - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814).
-- manageiq_provider - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814).
-- ocapi_utils - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833).
-- one_service - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814).
-- one_vm - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814).
-- onepassword lookup plugin - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833).
-- pids - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833).
-- pipx - added new states ``install_all``, ``uninject``, ``upgrade_shared``, ``pin``, and ``unpin`` (https://github.com/ansible-collections/community.general/pull/8809).
-- pipx - added parameter ``global`` to module (https://github.com/ansible-collections/community.general/pull/8793).
-- pipx - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833).
-- pipx_info - added parameter ``global`` to module (https://github.com/ansible-collections/community.general/pull/8793).
-- pipx_info - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833).
-- pkg5_publisher - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833).
-- proxmox - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814).
-- proxmox_disk - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814).
-- proxmox_kvm - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814).
-- proxmox_kvm - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822).
-- redfish_utils - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833).
-- redfish_utils module utils - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822).
-- redis cache plugin - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833).
-- remove_keys filter plugin - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814).
-- replace_keys filter plugin - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814).
-- scaleway - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833).
-- scaleway module utils - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822).
-- scaleway_compute - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833).
-- scaleway_ip - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833).
-- scaleway_lb - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833).
-- scaleway_security_group - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822).
-- scaleway_security_group - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833).
-- scaleway_user_data - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833).
-- sensu_silence - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833).
-- snmp_facts - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833).
-- sorcery - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833).
-- ufw - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822).
-- unsafe plugin utils - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814).
-- vardict module utils - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814).
-- vars MH module utils - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814).
-- vmadm - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822).
-
-Deprecated Features
--------------------
-
-- MH decorator cause_changes module utils - deprecate parameters ``on_success`` and ``on_failure`` (https://github.com/ansible-collections/community.general/pull/8791).
-- pipx - support for versions of the command line tool ``pipx`` older than ``1.7.0`` is deprecated and will be removed in community.general 11.0.0 (https://github.com/ansible-collections/community.general/pull/8793).
-- pipx_info - support for versions of the command line tool ``pipx`` older than ``1.7.0`` is deprecated and will be removed in community.general 11.0.0 (https://github.com/ansible-collections/community.general/pull/8793).
-
-Bugfixes
---------
-
-- gitlab_group_access_token - fix crash in check mode caused by attempted access to a newly created access token (https://github.com/ansible-collections/community.general/pull/8796).
-- gitlab_project - fix ``container_expiration_policy`` not being applied when creating a new project (https://github.com/ansible-collections/community.general/pull/8790).
-- gitlab_project - fix crash caused by old Gitlab projects not having a ``container_expiration_policy`` attribute (https://github.com/ansible-collections/community.general/pull/8790).
-- gitlab_project_access_token - fix crash in check mode caused by attempted access to a newly created access token (https://github.com/ansible-collections/community.general/pull/8796).
-- keycloak_realm_key - fix invalid usage of ``parent_id`` (https://github.com/ansible-collections/community.general/issues/7850, https://github.com/ansible-collections/community.general/pull/8823).
-- keycloak_user_federation - fix key error when removing mappers during an update and new mappers are specified in the module args (https://github.com/ansible-collections/community.general/pull/8762).
-- keycloak_user_federation - fix the ``UnboundLocalError`` that occurs when an ID is provided for a user federation mapper (https://github.com/ansible-collections/community.general/pull/8831).
-- keycloak_user_federation - sort desired and after mapper list by name (analog to before mapper list) to minimize diff and make change detection more accurate (https://github.com/ansible-collections/community.general/pull/8761).
-- proxmox inventory plugin - fixed a possible error on concatenating responses from proxmox. In case an API call unexpectedly returned an empty result, the inventory failed with a fatal error. Added check for empty response (https://github.com/ansible-collections/community.general/issues/8798, https://github.com/ansible-collections/community.general/pull/8794).
-
-New Modules
------------
-
-- community.general.keycloak_userprofile - Allows managing Keycloak User Profiles.
-- community.general.one_vnet - Manages OpenNebula virtual networks.
-
-v9.3.0
-======
-
-Release Summary
----------------
-
-Regular bugfix and feature release.
-
-Minor Changes
--------------
-
-- cgroup_memory_recap, hipchat, jabber, log_plays, loganalytics, logentries, logstash, slack, splunk, sumologic, syslog_json callback plugins - make sure that all options are typed (https://github.com/ansible-collections/community.general/pull/8628).
-- chef_databag, consul_kv, cyberarkpassword, dsv, etcd, filetree, hiera, onepassword, onepassword_doc, onepassword_raw, passwordstore, redis, shelvefile, tss lookup plugins - make sure that all options are typed (https://github.com/ansible-collections/community.general/pull/8626).
-- chroot, funcd, incus, iocage, jail, lxc, lxd, qubes, zone connection plugins - make sure that all options are typed (https://github.com/ansible-collections/community.general/pull/8627).
-- cobbler, linode, lxd, nmap, online, scaleway, stackpath_compute, virtualbox inventory plugins - make sure that all options are typed (https://github.com/ansible-collections/community.general/pull/8625).
-- doas, dzdo, ksu, machinectl, pbrun, pfexec, pmrun, sesu, sudosu become plugins - make sure that all options are typed (https://github.com/ansible-collections/community.general/pull/8623).
-- gconftool2 - make use of ``ModuleHelper`` features to simplify code (https://github.com/ansible-collections/community.general/pull/8711).
-- gitlab_project - add option ``container_expiration_policy`` to schedule container registry cleanup (https://github.com/ansible-collections/community.general/pull/8674).
-- gitlab_project - add option ``model_registry_access_level`` to disable model registry (https://github.com/ansible-collections/community.general/pull/8688).
-- gitlab_project - add option ``pages_access_level`` to disable project pages (https://github.com/ansible-collections/community.general/pull/8688).
-- gitlab_project - add option ``repository_access_level`` to disable project repository (https://github.com/ansible-collections/community.general/pull/8674).
-- gitlab_project - add option ``service_desk_enabled`` to disable service desk (https://github.com/ansible-collections/community.general/pull/8688).
-- locale_gen - add support for multiple locales (https://github.com/ansible-collections/community.general/issues/8677, https://github.com/ansible-collections/community.general/pull/8682).
-- memcached, pickle, redis, yaml cache plugins - make sure that all options are typed (https://github.com/ansible-collections/community.general/pull/8624).
-- opentelemetry callback plugin - fix default value for ``store_spans_in_file`` causing traces to be produced to a file named ``None`` (https://github.com/ansible-collections/community.general/issues/8566, https://github.com/ansible-collections/community.general/pull/8741).
-- passwordstore lookup plugin - add the current user to the lockfile file name to address issues on multi-user systems (https://github.com/ansible-collections/community.general/pull/8689).
-- pipx - add parameter ``suffix`` to module (https://github.com/ansible-collections/community.general/pull/8675, https://github.com/ansible-collections/community.general/issues/8656).
-- pkgng - add option ``use_globs`` (default ``true``) to optionally disable glob patterns (https://github.com/ansible-collections/community.general/issues/8632, https://github.com/ansible-collections/community.general/pull/8633).
-- proxmox inventory plugin - add new fact for LXC interface details (https://github.com/ansible-collections/community.general/pull/8713).
-- redis, redis_info - add ``client_cert`` and ``client_key`` options to specify path to certificate for Redis authentication (https://github.com/ansible-collections/community.general/pull/8654).
-
-Bugfixes
---------
-
-- gitlab_runner - fix ``paused`` parameter being ignored (https://github.com/ansible-collections/community.general/pull/8648).
-- homebrew_cask - fix ``upgrade_all`` returns ``changed`` when nothing upgraded (https://github.com/ansible-collections/community.general/issues/8707, https://github.com/ansible-collections/community.general/pull/8708).
-- keycloak_user_federation - get cleartext IDP ``clientSecret`` from full realm info to detect changes to it (https://github.com/ansible-collections/community.general/issues/8294, https://github.com/ansible-collections/community.general/pull/8735).
-- keycloak_user_federation - remove existing user federation mappers if they are not present in the federation configuration and will not be updated (https://github.com/ansible-collections/community.general/issues/7169, https://github.com/ansible-collections/community.general/pull/8695).
-- proxmox - fixed an issue where the new volume handling incorrectly converted ``null`` values into ``"None"`` strings (https://github.com/ansible-collections/community.general/pull/8646).
-- proxmox - fixed an issue where volume strings where overwritten instead of appended to in the new ``build_volume()`` method (https://github.com/ansible-collections/community.general/pull/8646).
-- proxmox - removed the forced conversion of non-string values to strings to be consistent with the module documentation (https://github.com/ansible-collections/community.general/pull/8646).
-
-New Modules
------------
-
-- community.general.bootc_manage - Bootc Switch and Upgrade.
-- community.general.homebrew_services - Services manager for Homebrew.
-- community.general.keycloak_realm_keys_metadata_info - Allows obtaining Keycloak realm keys metadata via Keycloak API.
-
-v9.2.0
-======
-
-Release Summary
----------------
-
-Regular bugfix and feature release.
-
-Minor Changes
--------------
-
-- CmdRunner module utils - the parameter ``force_lang`` now supports the special value ``auto`` which will automatically try and determine the best parsable locale in the system (https://github.com/ansible-collections/community.general/pull/8517).
-- proxmox - add ``disk_volume`` and ``mount_volumes`` keys for better readability (https://github.com/ansible-collections/community.general/pull/8542).
-- proxmox - translate the old ``disk`` and ``mounts`` keys to the new handling internally (https://github.com/ansible-collections/community.general/pull/8542).
-- proxmox_template - small refactor in logic for determining whether a template exists or not (https://github.com/ansible-collections/community.general/pull/8516).
-- redfish_* modules - adds ``ciphers`` option for custom cipher selection (https://github.com/ansible-collections/community.general/pull/8533).
-- sudosu become plugin - added an option (``alt_method``) to enhance compatibility with more versions of ``su`` (https://github.com/ansible-collections/community.general/pull/8214).
-- virtualbox inventory plugin - expose a new parameter ``enable_advanced_group_parsing`` to change how the VirtualBox dynamic inventory parses VM groups (https://github.com/ansible-collections/community.general/issues/8508, https://github.com/ansible-collections/community.general/pull/8510).
-- wdc_redfish_command - minor change to handle upgrade file for Redfish WD platforms (https://github.com/ansible-collections/community.general/pull/8444).
-
-Bugfixes
---------
-
-- bitwarden lookup plugin - fix ``KeyError`` in ``search_field`` (https://github.com/ansible-collections/community.general/issues/8549, https://github.com/ansible-collections/community.general/pull/8557).
-- keycloak_clientscope - remove IDs from clientscope and its protocol mappers on comparison for changed check (https://github.com/ansible-collections/community.general/pull/8545).
-- nsupdate - fix 'index out of range' error when changing NS records by falling back to authority section of the response (https://github.com/ansible-collections/community.general/issues/8612, https://github.com/ansible-collections/community.general/pull/8614).
-- proxmox - fix idempotency on creation of mount volumes using Proxmox' special ``:`` syntax (https://github.com/ansible-collections/community.general/issues/8407, https://github.com/ansible-collections/community.general/pull/8542).
-- redfish_utils module utils - do not fail when language is not exactly "en" (https://github.com/ansible-collections/community.general/pull/8613).
-
-New Plugins
------------
-
-Filter
-~~~~~~
-
-- community.general.reveal_ansible_type - Return input type.
-
-Test
-~~~~
-
-- community.general.ansible_type - Validate input type.
-
-v9.1.0
-======
-
-Release Summary
----------------
-
-Regular feature and bugfix release.
-
-Minor Changes
--------------
-
-- CmdRunner module util - argument formats can be specified as plain functions without calling ``cmd_runner_fmt.as_func()`` (https://github.com/ansible-collections/community.general/pull/8479).
-- ansible_galaxy_install - add upgrade feature (https://github.com/ansible-collections/community.general/pull/8431, https://github.com/ansible-collections/community.general/issues/8351).
-- cargo - add option ``directory``, which allows source directory to be specified (https://github.com/ansible-collections/community.general/pull/8480).
-- cmd_runner module utils - add decorator ``cmd_runner_fmt.stack`` (https://github.com/ansible-collections/community.general/pull/8415).
-- cmd_runner_fmt module utils - simplify implementation of ``cmd_runner_fmt.as_bool_not()`` (https://github.com/ansible-collections/community.general/pull/8512).
-- ipa_dnsrecord - adds ``SSHFP`` record type for managing SSH fingerprints in FreeIPA DNS (https://github.com/ansible-collections/community.general/pull/8404).
-- keycloak_client - assign auth flow by name (https://github.com/ansible-collections/community.general/pull/8428).
-- openbsd_pkg - adds diff support to show changes in installed package list. This does not yet work for check mode (https://github.com/ansible-collections/community.general/pull/8402).
-- proxmox - allow specification of the API port when using proxmox_* (https://github.com/ansible-collections/community.general/issues/8440, https://github.com/ansible-collections/community.general/pull/8441).
-- proxmox_vm_info - add ``network`` option to retrieve current network information (https://github.com/ansible-collections/community.general/pull/8471).
-- redfish_command - add ``wait`` and ``wait_timeout`` options to allow a user to block a command until a service is accessible after performing the requested command (https://github.com/ansible-collections/community.general/issues/8051, https://github.com/ansible-collections/community.general/pull/8434).
-- redfish_info - add command ``CheckAvailability`` to check if a service is accessible (https://github.com/ansible-collections/community.general/issues/8051, https://github.com/ansible-collections/community.general/pull/8434).
-- redis_info - adds support for getting cluster info (https://github.com/ansible-collections/community.general/pull/8464).
-
-Deprecated Features
--------------------
-
-- CmdRunner module util - setting the value of the ``ignore_none`` parameter within a ``CmdRunner`` context is deprecated and that feature should be removed in community.general 12.0.0 (https://github.com/ansible-collections/community.general/pull/8479).
-- git_config - the ``list_all`` option has been deprecated and will be removed in community.general 11.0.0. Use the ``community.general.git_config_info`` module instead (https://github.com/ansible-collections/community.general/pull/8453).
-- git_config - using ``state=present`` without providing ``value`` is deprecated and will be disallowed in community.general 11.0.0. Use the ``community.general.git_config_info`` module instead to read a value (https://github.com/ansible-collections/community.general/pull/8453).
-
-Bugfixes
---------
-
-- git_config - fix behavior of ``state=absent`` if ``value`` is present (https://github.com/ansible-collections/community.general/issues/8436, https://github.com/ansible-collections/community.general/pull/8452).
-- keycloak_realm - add normalizations for ``attributes`` and ``protocol_mappers`` (https://github.com/ansible-collections/community.general/pull/8496).
-- launched - correctly report changed status in check mode (https://github.com/ansible-collections/community.general/pull/8406).
-- opennebula inventory plugin - fix invalid reference to IP when inventory runs against NICs with no IPv4 address (https://github.com/ansible-collections/community.general/pull/8489).
-- opentelemetry callback - do not save the JSON response when using the ``ansible.builtin.uri`` module (https://github.com/ansible-collections/community.general/pull/8430).
-- opentelemetry callback - do not save the content response when using the ``ansible.builtin.slurp`` module (https://github.com/ansible-collections/community.general/pull/8430).
-- paman - do not fail if an empty list of packages has been provided and there is nothing to do (https://github.com/ansible-collections/community.general/pull/8514).
-
-Known Issues
-------------
-
-- homectl - the module does not work under Python 3.13 or newer, since it relies on the removed ``crypt`` standard library module (https://github.com/ansible-collections/community.general/issues/4691, https://github.com/ansible-collections/community.general/pull/8497).
-- udm_user - the module does not work under Python 3.13 or newer, since it relies on the removed ``crypt`` standard library module (https://github.com/ansible-collections/community.general/issues/4690, https://github.com/ansible-collections/community.general/pull/8497).
-
-New Plugins
------------
-
-Filter
-~~~~~~
-
-- community.general.keep_keys - Keep specific keys from dictionaries in a list.
-- community.general.remove_keys - Remove specific keys from dictionaries in a list.
-- community.general.replace_keys - Replace specific keys in a list of dictionaries.
-
-New Modules
------------
-
-- community.general.consul_agent_check - Add, modify, and delete checks within a consul cluster.
-- community.general.consul_agent_service - Add, modify and delete services within a consul cluster.
-- community.general.django_check - Wrapper for C(django-admin check).
-- community.general.django_createcachetable - Wrapper for C(django-admin createcachetable).
-
-v9.0.1
-======
-
-Release Summary
----------------
-
-Bugfix release for inclusion in Ansible 10.0.0rc1.
-
-Minor Changes
--------------
-
-- ansible_galaxy_install - minor refactor in the module (https://github.com/ansible-collections/community.general/pull/8413).
-
-Bugfixes
---------
-
-- cpanm - use new ``VarDict`` to prevent deprecation warning (https://github.com/ansible-collections/community.general/issues/8410, https://github.com/ansible-collections/community.general/pull/8411).
-- django module utils - use new ``VarDict`` to prevent deprecation warning (https://github.com/ansible-collections/community.general/issues/8410, https://github.com/ansible-collections/community.general/pull/8411).
-- gconftool2_info - use new ``VarDict`` to prevent deprecation warning (https://github.com/ansible-collections/community.general/issues/8410, https://github.com/ansible-collections/community.general/pull/8411).
-- homebrew - do not fail when brew prints warnings (https://github.com/ansible-collections/community.general/pull/8406, https://github.com/ansible-collections/community.general/issues/7044).
-- hponcfg - use new ``VarDict`` to prevent deprecation warning (https://github.com/ansible-collections/community.general/issues/8410, https://github.com/ansible-collections/community.general/pull/8411).
-- kernel_blacklist - use new ``VarDict`` to prevent deprecation warning (https://github.com/ansible-collections/community.general/issues/8410, https://github.com/ansible-collections/community.general/pull/8411).
-- keycloak_client - fix TypeError when sanitizing the ``saml.signing.private.key`` attribute in the module's diff or state output. The ``sanitize_cr`` function expected a dict where in some cases a list might occur (https://github.com/ansible-collections/community.general/pull/8403).
-- locale_gen - use new ``VarDict`` to prevent deprecation warning (https://github.com/ansible-collections/community.general/issues/8410, https://github.com/ansible-collections/community.general/pull/8411).
-- mksysb - use new ``VarDict`` to prevent deprecation warning (https://github.com/ansible-collections/community.general/issues/8410, https://github.com/ansible-collections/community.general/pull/8411).
-- pipx_info - use new ``VarDict`` to prevent deprecation warning (https://github.com/ansible-collections/community.general/issues/8410, https://github.com/ansible-collections/community.general/pull/8411).
-- snap - use new ``VarDict`` to prevent deprecation warning (https://github.com/ansible-collections/community.general/issues/8410, https://github.com/ansible-collections/community.general/pull/8411).
-- snap_alias - use new ``VarDict`` to prevent deprecation warning (https://github.com/ansible-collections/community.general/issues/8410, https://github.com/ansible-collections/community.general/pull/8411).
-
-v9.0.0
-======
-
-Release Summary
----------------
-
-This is release 9.0.0 of ``community.general``, released on 2024-05-20.
-
-Minor Changes
--------------
-
-- PythonRunner module utils - specialisation of ``CmdRunner`` to execute Python scripts (https://github.com/ansible-collections/community.general/pull/8289).
-- Use offset-aware ``datetime.datetime`` objects (with timezone UTC) instead of offset-naive UTC timestamps, which are deprecated in Python 3.12 (https://github.com/ansible-collections/community.general/pull/8222).
-- aix_lvol - refactor module to pass list of arguments to ``module.run_command()`` instead of relying on interpretation by a shell (https://github.com/ansible-collections/community.general/pull/8264).
-- apt_rpm - add new states ``latest`` and ``present_not_latest``. The value ``latest`` is equivalent to the current behavior of ``present``, which will upgrade a package if a newer version exists. ``present_not_latest`` does what most users would expect ``present`` to do: it does not upgrade if the package is already installed. The current behavior of ``present`` will be deprecated in a later version, and eventually changed to that of ``present_not_latest`` (https://github.com/ansible-collections/community.general/issues/8217, https://github.com/ansible-collections/community.general/pull/8247).
-- apt_rpm - refactor module to pass list of arguments to ``module.run_command()`` instead of relying on interpretation by a shell (https://github.com/ansible-collections/community.general/pull/8264).
-- bitwarden lookup plugin - add ``bw_session`` option, to pass session key instead of reading from env (https://github.com/ansible-collections/community.general/pull/7994).
-- bitwarden lookup plugin - add support to filter by organization ID (https://github.com/ansible-collections/community.general/pull/8188).
-- bitwarden lookup plugin - allows to fetch all records of a given collection ID, by allowing to pass an empty value for ``search_value`` when ``collection_id`` is provided (https://github.com/ansible-collections/community.general/pull/8013).
-- bitwarden lookup plugin - when looking for items using an item ID, the item is now accessed directly with ``bw get item`` instead of searching through all items. This doubles the lookup speed (https://github.com/ansible-collections/community.general/pull/7468).
-- btrfs_subvolume - refactor module to pass list of arguments to ``module.run_command()`` instead of relying on interpretation by a shell (https://github.com/ansible-collections/community.general/pull/8264).
-- cmd_runner module_utils - add validation for minimum and maximum length in the value passed to ``cmd_runner_fmt.as_list()`` (https://github.com/ansible-collections/community.general/pull/8288).
-- consul_auth_method, consul_binding_rule, consul_policy, consul_role, consul_session, consul_token - added action group ``community.general.consul`` (https://github.com/ansible-collections/community.general/pull/7897).
-- consul_policy - added support for diff and check mode (https://github.com/ansible-collections/community.general/pull/7878).
-- consul_policy, consul_role, consul_session - removed dependency on ``requests`` and factored out common parts (https://github.com/ansible-collections/community.general/pull/7826, https://github.com/ansible-collections/community.general/pull/7878).
-- consul_role - ``node_identities`` now expects a ``node_name`` option to match the Consul API, the old ``name`` is still supported as alias (https://github.com/ansible-collections/community.general/pull/7878).
-- consul_role - ``service_identities`` now expects a ``service_name`` option to match the Consul API, the old ``name`` is still supported as alias (https://github.com/ansible-collections/community.general/pull/7878).
-- consul_role - added support for diff mode (https://github.com/ansible-collections/community.general/pull/7878).
-- consul_role - added support for templated policies (https://github.com/ansible-collections/community.general/pull/7878).
-- elastic callback plugin - close elastic client to not leak resources (https://github.com/ansible-collections/community.general/pull/7517).
-- filesystem - add bcachefs support (https://github.com/ansible-collections/community.general/pull/8126).
-- gandi_livedns - adds support for personal access tokens (https://github.com/ansible-collections/community.general/issues/7639, https://github.com/ansible-collections/community.general/pull/8337).
-- gconftool2 - use ``ModuleHelper`` with ``VarDict`` (https://github.com/ansible-collections/community.general/pull/8226).
-- git_config - allow multiple git configs for the same name with the new ``add_mode`` option (https://github.com/ansible-collections/community.general/pull/7260).
-- git_config - the ``after`` and ``before`` fields in the ``diff`` of the return value can be a list instead of a string in case more configs with the same key are affected (https://github.com/ansible-collections/community.general/pull/7260).
-- git_config - when a value is unset, all configs with the same key are unset (https://github.com/ansible-collections/community.general/pull/7260).
-- gitlab modules - add ``ca_path`` option (https://github.com/ansible-collections/community.general/pull/7472).
-- gitlab modules - remove duplicate ``gitlab`` package check (https://github.com/ansible-collections/community.general/pull/7486).
-- gitlab_deploy_key, gitlab_group_members, gitlab_group_variable, gitlab_hook, gitlab_instance_variable, gitlab_project_badge, gitlab_project_variable, gitlab_user - improve API pagination and compatibility with different versions of ``python-gitlab`` (https://github.com/ansible-collections/community.general/pull/7790).
-- gitlab_hook - adds ``releases_events`` parameter for supporting Releases events triggers on GitLab hooks (https://github.com/ansible-collections/community.general/pull/7956).
-- gitlab_runner - add support for new runner creation workflow (https://github.com/ansible-collections/community.general/pull/7199).
-- homebrew - adds ``force_formula`` parameter to disambiguate a formula from a cask of the same name (https://github.com/ansible-collections/community.general/issues/8274).
-- homebrew, homebrew_cask - refactor common argument validation logic into a dedicated ``homebrew`` module utils (https://github.com/ansible-collections/community.general/issues/8323, https://github.com/ansible-collections/community.general/pull/8324).
-- icinga2 inventory plugin - add Jinja2 templating support to ``url``, ``user``, and ``password`` paramenters (https://github.com/ansible-collections/community.general/issues/7074, https://github.com/ansible-collections/community.general/pull/7996).
-- icinga2 inventory plugin - adds new parameter ``group_by_hostgroups`` in order to make grouping by Icinga2 hostgroups optional (https://github.com/ansible-collections/community.general/pull/7998).
-- ini_file - add an optional parameter ``section_has_values``. If the target ini file contains more than one ``section``, use ``section_has_values`` to specify which one should be updated (https://github.com/ansible-collections/community.general/pull/7505).
-- ini_file - support optional spaces between section names and their surrounding brackets (https://github.com/ansible-collections/community.general/pull/8075).
-- installp - refactor module to pass list of arguments to ``module.run_command()`` instead of relying on interpretation by a shell (https://github.com/ansible-collections/community.general/pull/8264).
-- ipa_config - adds ``passkey`` choice to ``ipauserauthtype`` parameter's choices (https://github.com/ansible-collections/community.general/pull/7588).
-- ipa_dnsrecord - adds ability to manage NS record types (https://github.com/ansible-collections/community.general/pull/7737).
-- ipa_pwpolicy - refactor module and exchange a sequence ``if`` statements with a ``for`` loop (https://github.com/ansible-collections/community.general/pull/7723).
-- ipa_pwpolicy - update module to support ``maxrepeat``, ``maxsequence``, ``dictcheck``, ``usercheck``, ``gracelimit`` parameters in FreeIPA password policies (https://github.com/ansible-collections/community.general/pull/7723).
-- ipa_sudorule - adds options to include denied commands or command groups (https://github.com/ansible-collections/community.general/pull/7415).
-- ipa_user - adds ``idp`` and ``passkey`` choice to ``ipauserauthtype`` parameter's choices (https://github.com/ansible-collections/community.general/pull/7589).
-- irc - add ``validate_certs`` option, and rename ``use_ssl`` to ``use_tls``, while keeping ``use_ssl`` as an alias. The default value for ``validate_certs`` is ``false`` for backwards compatibility. We recommend to every user of this module to explicitly set ``use_tls=true`` and `validate_certs=true`` whenever possible, especially when communicating to IRC servers over the internet (https://github.com/ansible-collections/community.general/pull/7550).
-- java_cert - add ``cert_content`` argument (https://github.com/ansible-collections/community.general/pull/8153).
-- java_cert - enable ``owner``, ``group``, ``mode``, and other generic file arguments (https://github.com/ansible-collections/community.general/pull/8116).
-- kernel_blacklist - use ``ModuleHelper`` with ``VarDict`` (https://github.com/ansible-collections/community.general/pull/8226).
-- keycloak module utils - expose error message from Keycloak server for HTTP errors in some specific situations (https://github.com/ansible-collections/community.general/pull/7645).
-- keycloak_client, keycloak_clientscope, keycloak_clienttemplate - added ``docker-v2`` protocol support, enhancing alignment with Keycloak's protocol options (https://github.com/ansible-collections/community.general/issues/8215, https://github.com/ansible-collections/community.general/pull/8216).
-- keycloak_realm_key - the ``config.algorithm`` option now supports 8 additional key algorithms (https://github.com/ansible-collections/community.general/pull/7698).
-- keycloak_realm_key - the ``config.certificate`` option value is no longer defined with ``no_log=True`` (https://github.com/ansible-collections/community.general/pull/7698).
-- keycloak_realm_key - the ``provider_id`` option now supports RSA encryption key usage (value ``rsa-enc``) (https://github.com/ansible-collections/community.general/pull/7698).
-- keycloak_user_federation - add option for ``krbPrincipalAttribute`` (https://github.com/ansible-collections/community.general/pull/7538).
-- keycloak_user_federation - allow custom user storage providers to be set through ``provider_id`` (https://github.com/ansible-collections/community.general/pull/7789).
-- ldap_attrs - module now supports diff mode, showing which attributes are changed within an operation (https://github.com/ansible-collections/community.general/pull/8073).
-- lvg - refactor module to pass list of arguments to ``module.run_command()`` instead of relying on interpretation by a shell (https://github.com/ansible-collections/community.general/pull/8264).
-- lvol - change ``pvs`` argument type to list of strings (https://github.com/ansible-collections/community.general/pull/7676, https://github.com/ansible-collections/community.general/issues/7504).
-- lvol - refactor module to pass list of arguments to ``module.run_command()`` instead of relying on interpretation by a shell (https://github.com/ansible-collections/community.general/pull/8264).
-- lxd connection plugin - tighten the detection logic for lxd ``Instance not found`` errors, to avoid false detection on unrelated errors such as ``/usr/bin/python3: not found`` (https://github.com/ansible-collections/community.general/pull/7521).
-- lxd_container - uses ``/1.0/instances`` API endpoint, if available. Falls back to ``/1.0/containers`` or ``/1.0/virtual-machines``. Fixes issue when using Incus or LXD 5.19 due to migrating to ``/1.0/instances`` endpoint (https://github.com/ansible-collections/community.general/pull/7980).
-- macports - refactor module to pass list of arguments to ``module.run_command()`` instead of relying on interpretation by a shell (https://github.com/ansible-collections/community.general/pull/8264).
-- mail - add ``Message-ID`` header; which is required by some mail servers (https://github.com/ansible-collections/community.general/pull/7740).
-- mail module, mail callback plugin - allow to configure the domain name of the Message-ID header with a new ``message_id_domain`` option (https://github.com/ansible-collections/community.general/pull/7765).
-- mssql_script - adds transactional (rollback/commit) support via optional boolean param ``transaction`` (https://github.com/ansible-collections/community.general/pull/7976).
-- netcup_dns - adds support for record types ``OPENPGPKEY``, ``SMIMEA``, and ``SSHFP`` (https://github.com/ansible-collections/community.general/pull/7489).
-- nmcli - add support for new connection type ``loopback`` (https://github.com/ansible-collections/community.general/issues/6572).
-- nmcli - adds OpenvSwitch support with new ``type`` values ``ovs-port``, ``ovs-interface``, and ``ovs-bridge``, and new ``slave_type`` value ``ovs-port`` (https://github.com/ansible-collections/community.general/pull/8154).
-- nmcli - allow for ``infiniband`` slaves of ``bond`` interface types (https://github.com/ansible-collections/community.general/pull/7569).
-- nmcli - allow for the setting of ``MTU`` for ``infiniband`` and ``bond`` interface types (https://github.com/ansible-collections/community.general/pull/7499).
-- nmcli - allow setting ``MTU`` for ``bond-slave`` interface types (https://github.com/ansible-collections/community.general/pull/8118).
-- onepassword lookup plugin - support 1Password Connect with the opv2 client by setting the connect_host and connect_token parameters (https://github.com/ansible-collections/community.general/pull/7116).
-- onepassword_raw lookup plugin - support 1Password Connect with the opv2 client by setting the connect_host and connect_token parameters (https://github.com/ansible-collections/community.general/pull/7116)
-- opentelemetry - add support for HTTP trace_exporter and configures the behavior via ``OTEL_EXPORTER_OTLP_TRACES_PROTOCOL`` (https://github.com/ansible-collections/community.general/issues/7888, https://github.com/ansible-collections/community.general/pull/8321).
-- opentelemetry - add support for exporting spans in a file via ``ANSIBLE_OPENTELEMETRY_STORE_SPANS_IN_FILE`` (https://github.com/ansible-collections/community.general/issues/7888, https://github.com/ansible-collections/community.general/pull/8363).
-- opkg - use ``ModuleHelper`` with ``VarDict`` (https://github.com/ansible-collections/community.general/pull/8226).
-- osx_defaults - add option ``check_types`` to enable changing the type of existing defaults on the fly (https://github.com/ansible-collections/community.general/pull/8173).
-- parted - refactor module to pass list of arguments to ``module.run_command()`` instead of relying on interpretation by a shell (https://github.com/ansible-collections/community.general/pull/8264).
-- passwordstore - adds ``timestamp`` and ``preserve`` parameters to modify the stored password format (https://github.com/ansible-collections/community.general/pull/7426).
-- passwordstore lookup - add ``missing_subkey`` parameter defining the behavior of the lookup when a passwordstore subkey is missing (https://github.com/ansible-collections/community.general/pull/8166).
-- pipx - use ``ModuleHelper`` with ``VarDict`` (https://github.com/ansible-collections/community.general/pull/8226).
-- pkg5 - add support for non-silent execution (https://github.com/ansible-collections/community.general/issues/8379, https://github.com/ansible-collections/community.general/pull/8382).
-- pkgin - refactor module to pass list of arguments to ``module.run_command()`` instead of relying on interpretation by a shell (https://github.com/ansible-collections/community.general/pull/8264).
-- portage - adds the possibility to explicitely tell portage to write packages to world file (https://github.com/ansible-collections/community.general/issues/6226, https://github.com/ansible-collections/community.general/pull/8236).
-- portinstall - refactor module to pass list of arguments to ``module.run_command()`` instead of relying on interpretation by a shell (https://github.com/ansible-collections/community.general/pull/8264).
-- proxmox - adds ``startup`` parameters to configure startup order, startup delay and shutdown delay (https://github.com/ansible-collections/community.general/pull/8038).
-- proxmox - adds ``template`` value to the ``state`` parameter, allowing conversion of container to a template (https://github.com/ansible-collections/community.general/pull/7143).
-- proxmox - adds ``update`` parameter, allowing update of an already existing containers configuration (https://github.com/ansible-collections/community.general/pull/7540).
-- proxmox inventory plugin - adds an option to exclude nodes from the dynamic inventory generation. The new setting is optional, not using this option will behave as usual (https://github.com/ansible-collections/community.general/issues/6714, https://github.com/ansible-collections/community.general/pull/7461).
-- proxmox* modules - there is now a ``community.general.proxmox`` module defaults group that can be used to set default options for all Proxmox modules (https://github.com/ansible-collections/community.general/pull/8334).
-- proxmox_disk - add ability to manipulate CD-ROM drive (https://github.com/ansible-collections/community.general/pull/7495).
-- proxmox_kvm - add parameter ``update_unsafe`` to avoid limitations when updating dangerous values (https://github.com/ansible-collections/community.general/pull/7843).
-- proxmox_kvm - adds ``template`` value to the ``state`` parameter, allowing conversion of a VM to a template (https://github.com/ansible-collections/community.general/pull/7143).
-- proxmox_kvm - adds``usb`` parameter for setting USB devices on proxmox KVM VMs (https://github.com/ansible-collections/community.general/pull/8199).
-- proxmox_kvm - support the ``hookscript`` parameter (https://github.com/ansible-collections/community.general/issues/7600).
-- proxmox_ostype - it is now possible to specify the ``ostype`` when creating an LXC container (https://github.com/ansible-collections/community.general/pull/7462).
-- proxmox_vm_info - add ability to retrieve configuration info (https://github.com/ansible-collections/community.general/pull/7485).
-- puppet - new feature to set ``--waitforlock`` option (https://github.com/ansible-collections/community.general/pull/8282).
-- redfish_command - add command ``ResetToDefaults`` to reset manager to default state (https://github.com/ansible-collections/community.general/issues/8163).
-- redfish_config - add command ``SetServiceIdentification`` to set service identification (https://github.com/ansible-collections/community.general/issues/7916).
-- redfish_info - add boolean return value ``MultipartHttpPush`` to ``GetFirmwareUpdateCapabilities`` (https://github.com/ansible-collections/community.general/issues/8194, https://github.com/ansible-collections/community.general/pull/8195).
-- redfish_info - add command ``GetServiceIdentification`` to get service identification (https://github.com/ansible-collections/community.general/issues/7882).
-- redfish_info - adding the ``BootProgress`` property when getting ``Systems`` info (https://github.com/ansible-collections/community.general/pull/7626).
-- revbitspss lookup plugin - removed a redundant unicode prefix. The prefix was not necessary for Python 3 and has been cleaned up to streamline the code (https://github.com/ansible-collections/community.general/pull/8087).
-- rundeck module utils - allow to pass ``Content-Type`` to API requests (https://github.com/ansible-collections/community.general/pull/7684).
-- slackpkg - refactor module to pass list of arguments to ``module.run_command()`` instead of relying on interpretation by a shell (https://github.com/ansible-collections/community.general/pull/8264).
-- ssh_config - adds ``controlmaster``, ``controlpath`` and ``controlpersist`` parameters (https://github.com/ansible-collections/community.general/pull/7456).
-- ssh_config - allow ``accept-new`` as valid value for ``strict_host_key_checking`` (https://github.com/ansible-collections/community.general/pull/8257).
-- ssh_config - new feature to set ``AddKeysToAgent`` option to ``yes`` or ``no`` (https://github.com/ansible-collections/community.general/pull/7703).
-- ssh_config - new feature to set ``IdentitiesOnly`` option to ``yes`` or ``no`` (https://github.com/ansible-collections/community.general/pull/7704).
-- sudoers - add support for the ``NOEXEC`` tag in sudoers rules (https://github.com/ansible-collections/community.general/pull/7983).
-- svr4pkg - refactor module to pass list of arguments to ``module.run_command()`` instead of relying on interpretation by a shell (https://github.com/ansible-collections/community.general/pull/8264).
-- swdepot - refactor module to pass list of arguments to ``module.run_command()`` instead of relying on interpretation by a shell (https://github.com/ansible-collections/community.general/pull/8264).
-- terraform - add support for ``diff_mode`` for terraform resource_changes (https://github.com/ansible-collections/community.general/pull/7896).
-- terraform - fix ``diff_mode`` in state ``absent`` and when terraform ``resource_changes`` does not exist (https://github.com/ansible-collections/community.general/pull/7963).
-- xcc_redfish_command - added support for raw POSTs (``command=PostResource`` in ``category=Raw``) without a specific action info (https://github.com/ansible-collections/community.general/pull/7746).
-- xfconf - use ``ModuleHelper`` with ``VarDict`` (https://github.com/ansible-collections/community.general/pull/8226).
-- xfconf_info - use ``ModuleHelper`` with ``VarDict`` (https://github.com/ansible-collections/community.general/pull/8226).
-
-Breaking Changes / Porting Guide
---------------------------------
-
-- cpanm - the default of the ``mode`` option changed from ``compatibility`` to ``new`` (https://github.com/ansible-collections/community.general/pull/8198).
-- django_manage - the module now requires Django >= 4.1 (https://github.com/ansible-collections/community.general/pull/8198).
-- django_manage - the module will now fail if ``virtualenv`` is specified but no virtual environment exists at that location (https://github.com/ansible-collections/community.general/pull/8198).
-- redfish_command, redfish_config, redfish_info - change the default for ``timeout`` from 10 to 60 (https://github.com/ansible-collections/community.general/pull/8198).
-
-Deprecated Features
--------------------
-
-- MH DependencyCtxMgr module_utils - deprecate ``module_utils.mh.mixin.deps.DependencyCtxMgr`` in favour of ``module_utils.deps`` (https://github.com/ansible-collections/community.general/pull/8280).
-- ModuleHelper module_utils - deprecate ``plugins.module_utils.module_helper.AnsibleModule`` (https://github.com/ansible-collections/community.general/pull/8280).
-- ModuleHelper module_utils - deprecate ``plugins.module_utils.module_helper.DependencyCtxMgr`` (https://github.com/ansible-collections/community.general/pull/8280).
-- ModuleHelper module_utils - deprecate ``plugins.module_utils.module_helper.StateMixin`` (https://github.com/ansible-collections/community.general/pull/8280).
-- ModuleHelper module_utils - deprecate ``plugins.module_utils.module_helper.VarDict,`` (https://github.com/ansible-collections/community.general/pull/8280).
-- ModuleHelper module_utils - deprecate ``plugins.module_utils.module_helper.VarMeta`` (https://github.com/ansible-collections/community.general/pull/8280).
-- ModuleHelper module_utils - deprecate ``plugins.module_utils.module_helper.VarsMixin`` (https://github.com/ansible-collections/community.general/pull/8280).
-- ModuleHelper module_utils - deprecate use of ``VarsMixin`` in favor of using the ``VardDict`` module_utils (https://github.com/ansible-collections/community.general/pull/8226).
-- ModuleHelper vars module_utils - bump deprecation of ``VarMeta``, ``VarDict`` and ``VarsMixin`` to version 11.0.0 (https://github.com/ansible-collections/community.general/pull/8226).
-- apt_rpm - the behavior of ``state=present`` and ``state=installed`` is deprecated and will change in community.general 11.0.0. Right now the module will upgrade a package to the latest version if one of these two states is used. You should explicitly use ``state=latest`` if you want this behavior, and switch to ``state=present_not_latest`` if you do not want to upgrade the package if it is already installed. In community.general 11.0.0 the behavior of ``state=present`` and ``state=installed`` will change to that of ``state=present_not_latest`` (https://github.com/ansible-collections/community.general/issues/8217, https://github.com/ansible-collections/community.general/pull/8285).
-- consul_acl - the module has been deprecated and will be removed in community.general 10.0.0. ``consul_token`` and ``consul_policy`` can be used instead (https://github.com/ansible-collections/community.general/pull/7901).
-- django_manage - the ``ack_venv_creation_deprecation`` option has no more effect and will be removed from community.general 11.0.0 (https://github.com/ansible-collections/community.general/pull/8198).
-- gitlab modules - the basic auth method on GitLab API have been deprecated and will be removed in community.general 10.0.0 (https://github.com/ansible-collections/community.general/pull/8383).
-- hipchat callback plugin - the hipchat service has been discontinued and the self-hosted variant has been End of Life since 2020. The callback plugin is therefore deprecated and will be removed from community.general 10.0.0 if nobody provides compelling reasons to still keep it (https://github.com/ansible-collections/community.general/issues/8184, https://github.com/ansible-collections/community.general/pull/8189).
-- irc - the defaults ``false`` for ``use_tls`` and ``validate_certs`` have been deprecated and will change to ``true`` in community.general 10.0.0 to improve security. You can already improve security now by explicitly setting them to ``true``. Specifying values now disables the deprecation warning (https://github.com/ansible-collections/community.general/pull/7578).
-
-Removed Features (previously deprecated)
-----------------------------------------
-
-- The deprecated redirects for internal module names have been removed. These internal redirects were extra-long FQCNs like ``community.general.packaging.os.apt_rpm`` that redirect to the short FQCN ``community.general.apt_rpm``. They were originally needed to implement flatmapping; as various tooling started to recommend users to use the long names flatmapping was removed from the collection and redirects were added for users who already followed these incorrect recommendations (https://github.com/ansible-collections/community.general/pull/7835).
-- ansible_galaxy_install - the ``ack_ansible29`` and ``ack_min_ansiblecore211`` options have been removed. They no longer had any effect (https://github.com/ansible-collections/community.general/pull/8198).
-- cloudflare_dns - remove support for SPF records. These are no longer supported by CloudFlare (https://github.com/ansible-collections/community.general/pull/7782).
-- django_manage - support for the ``command`` values ``cleanup``, ``syncdb``, and ``validate`` were removed. Use ``clearsessions``, ``migrate``, and ``check`` instead, respectively (https://github.com/ansible-collections/community.general/pull/8198).
-- flowdock - this module relied on HTTPS APIs that do not exist anymore and was thus removed (https://github.com/ansible-collections/community.general/pull/8198).
-- mh.mixins.deps module utils - the ``DependencyMixin`` has been removed. Use the ``deps`` module utils instead (https://github.com/ansible-collections/community.general/pull/8198).
-- proxmox - the ``proxmox_default_behavior`` option has been removed (https://github.com/ansible-collections/community.general/pull/8198).
-- rax* modules, rax module utils, rax docs fragment - the Rackspace modules relied on the deprecated package ``pyrax`` and were thus removed (https://github.com/ansible-collections/community.general/pull/8198).
-- redhat module utils - the classes ``Rhsm``, ``RhsmPool``, and ``RhsmPools`` have been removed (https://github.com/ansible-collections/community.general/pull/8198).
-- redhat_subscription - the alias ``autosubscribe`` of the ``auto_attach`` option was removed (https://github.com/ansible-collections/community.general/pull/8198).
-- stackdriver - this module relied on HTTPS APIs that do not exist anymore and was thus removed (https://github.com/ansible-collections/community.general/pull/8198).
-- webfaction_* modules - these modules relied on HTTPS APIs that do not exist anymore and were thus removed (https://github.com/ansible-collections/community.general/pull/8198).
-
-Security Fixes
---------------
-
-- cobbler, gitlab_runners, icinga2, linode, lxd, nmap, online, opennebula, proxmox, scaleway, stackpath_compute, virtualbox, and xen_orchestra inventory plugin - make sure all data received from the remote servers is marked as unsafe, so remote code execution by obtaining texts that can be evaluated as templates is not possible (https://www.die-welt.net/2024/03/remote-code-execution-in-ansible-dynamic-inventory-plugins/, https://github.com/ansible-collections/community.general/pull/8098).
-- keycloak_identity_provider - the client secret was not correctly sanitized by the module. The return values ``proposed``, ``existing``, and ``end_state``, as well as the diff, did contain the client secret unmasked (https://github.com/ansible-collections/community.general/pull/8355).
-
-Bugfixes
---------
-
-- aix_filesystem - fix ``_validate_vg`` not passing VG name to ``lsvg_cmd`` (https://github.com/ansible-collections/community.general/issues/8151).
-- aix_filesystem - fix issue with empty list items in crfs logic and option order (https://github.com/ansible-collections/community.general/pull/8052).
-- apt-rpm - the module did not upgrade packages if a newer version exists. Now the package will be reinstalled if the candidate is newer than the installed version (https://github.com/ansible-collections/community.general/issues/7414).
-- apt_rpm - when checking whether packages were installed after running ``apt-get -y install ``, only the last package name was checked (https://github.com/ansible-collections/community.general/pull/8263).
-- bitwarden_secrets_manager lookup plugin - implements retry with exponential backoff to avoid lookup errors when Bitwardn's API rate limiting is encountered (https://github.com/ansible-collections/community.general/issues/8230, https://github.com/ansible-collections/community.general/pull/8238).
-- cargo - fix idempotency issues when using a custom installation path for packages (using the ``--path`` parameter). The initial installation runs fine, but subsequent runs use the ``get_installed()`` function which did not check the given installation location, before running ``cargo install``. This resulted in a false ``changed`` state. Also the removal of packeges using ``state: absent`` failed, as the installation check did not use the given parameter (https://github.com/ansible-collections/community.general/pull/7970).
-- cloudflare_dns - fix Cloudflare lookup of SHFP records (https://github.com/ansible-collections/community.general/issues/7652).
-- consul_token - fix token creation without ``accessor_id`` (https://github.com/ansible-collections/community.general/pull/8091).
-- from_ini filter plugin - disabling interpolation of ``ConfigParser`` to allow converting values with a ``%`` sign (https://github.com/ansible-collections/community.general/issues/8183, https://github.com/ansible-collections/community.general/pull/8185).
-- gitlab_group_members - fix gitlab constants call in ``gitlab_group_members`` module (https://github.com/ansible-collections/community.general/issues/7467).
-- gitlab_issue - fix behavior to search GitLab issue, using ``search`` keyword instead of ``title`` (https://github.com/ansible-collections/community.general/issues/7846).
-- gitlab_issue, gitlab_label, gitlab_milestone - avoid crash during version comparison when the python-gitlab Python module is not installed (https://github.com/ansible-collections/community.general/pull/8158).
-- gitlab_project_members - fix gitlab constants call in ``gitlab_project_members`` module (https://github.com/ansible-collections/community.general/issues/7467).
-- gitlab_protected_branches - fix gitlab constants call in ``gitlab_protected_branches`` module (https://github.com/ansible-collections/community.general/issues/7467).
-- gitlab_runner - fix pagination when checking for existing runners (https://github.com/ansible-collections/community.general/pull/7790).
-- gitlab_user - fix gitlab constants call in ``gitlab_user`` module (https://github.com/ansible-collections/community.general/issues/7467).
-- haproxy - fix an issue where HAProxy could get stuck in DRAIN mode when the backend was unreachable (https://github.com/ansible-collections/community.general/issues/8092).
-- homebrew - detect already installed formulae and casks using JSON output from ``brew info`` (https://github.com/ansible-collections/community.general/issues/864).
-- homebrew - error returned from brew command was ignored and tried to parse empty JSON. Fix now checks for an error and raises it to give accurate error message to users (https://github.com/ansible-collections/community.general/issues/8047).
-- incus connection plugin - treats ``inventory_hostname`` as a variable instead of a literal in remote connections (https://github.com/ansible-collections/community.general/issues/7874).
-- interface_files - also consider ``address_family`` when changing ``option=method`` (https://github.com/ansible-collections/community.general/issues/7610, https://github.com/ansible-collections/community.general/pull/7612).
-- inventory plugins - add unsafe wrapper to avoid marking strings that do not contain ``{`` or ``}`` as unsafe, to work around a bug in AWX ((https://github.com/ansible-collections/community.general/issues/8212, https://github.com/ansible-collections/community.general/pull/8225).
-- ipa - fix get version regex in IPA module_utils (https://github.com/ansible-collections/community.general/pull/8175).
-- ipa_hbacrule - the module uses a string for ``ipaenabledflag`` for new FreeIPA versions while the returned value is a boolean (https://github.com/ansible-collections/community.general/pull/7880).
-- ipa_otptoken - the module expect ``ipatokendisabled`` as string but the ``ipatokendisabled`` value is returned as a boolean (https://github.com/ansible-collections/community.general/pull/7795).
-- ipa_sudorule - the module uses a string for ``ipaenabledflag`` for new FreeIPA versions while the returned value is a boolean (https://github.com/ansible-collections/community.general/pull/7880).
-- iptables_state - fix idempotency issues when restoring incomplete iptables dumps (https://github.com/ansible-collections/community.general/issues/8029).
-- irc - replace ``ssl.wrap_socket`` that was removed from Python 3.12 with code for creating a proper SSL context (https://github.com/ansible-collections/community.general/pull/7542).
-- keycloak_* - fix Keycloak API client to quote ``/`` properly (https://github.com/ansible-collections/community.general/pull/7641).
-- keycloak_authz_permission - resource payload variable for scope-based permission was constructed as a string, when it needs to be a list, even for a single item (https://github.com/ansible-collections/community.general/issues/7151).
-- keycloak_client - add sorted ``defaultClientScopes`` and ``optionalClientScopes`` to normalizations (https://github.com/ansible-collections/community.general/pull/8223).
-- keycloak_client - fixes issue when metadata is provided in desired state when task is in check mode (https://github.com/ansible-collections/community.general/issues/1226, https://github.com/ansible-collections/community.general/pull/7881).
-- keycloak_identity_provider - ``mappers`` processing was not idempotent if the mappers configuration list had not been sorted by name (in ascending order). Fix resolves the issue by sorting mappers in the desired state using the same key which is used for obtaining existing state (https://github.com/ansible-collections/community.general/pull/7418).
-- keycloak_identity_provider - it was not possible to reconfigure (add, remove) ``mappers`` once they were created initially. Removal was ignored, adding new ones resulted in dropping the pre-existing unmodified mappers. Fix resolves the issue by supplying correct input to the internal update call (https://github.com/ansible-collections/community.general/pull/7418).
-- keycloak_realm - add normalizations for ``enabledEventTypes`` and ``supportedLocales`` (https://github.com/ansible-collections/community.general/pull/8224).
-- keycloak_user - when ``force`` is set, but user does not exist, do not try to delete it (https://github.com/ansible-collections/community.general/pull/7696).
-- keycloak_user_federation - fix diff of empty ``krbPrincipalAttribute`` (https://github.com/ansible-collections/community.general/pull/8320).
-- ldap - previously the order number (if present) was expected to follow an equals sign in the DN. This makes it so the order number string is identified correctly anywhere within the DN (https://github.com/ansible-collections/community.general/issues/7646).
-- linode inventory plugin - add descriptive error message for linode inventory plugin (https://github.com/ansible-collections/community.general/pull/8133).
-- log_entries callback plugin - replace ``ssl.wrap_socket`` that was removed from Python 3.12 with code for creating a proper SSL context (https://github.com/ansible-collections/community.general/pull/7542).
-- lvol - test for output messages in both ``stdout`` and ``stderr`` (https://github.com/ansible-collections/community.general/pull/7601, https://github.com/ansible-collections/community.general/issues/7182).
-- merge_variables lookup plugin - fixing cross host merge: providing access to foreign hosts variables to the perspective of the host that is performing the merge (https://github.com/ansible-collections/community.general/pull/8303).
-- modprobe - listing modules files or modprobe files could trigger a FileNotFoundError if ``/etc/modprobe.d`` or ``/etc/modules-load.d`` did not exist. Relevant functions now return empty lists if the directories do not exist to avoid crashing the module (https://github.com/ansible-collections/community.general/issues/7717).
-- mssql_script - make the module work with Python 2 (https://github.com/ansible-collections/community.general/issues/7818, https://github.com/ansible-collections/community.general/pull/7821).
-- nmcli - fix ``connection.slave-type`` wired to ``bond`` and not with parameter ``slave_type`` in case of connection type ``wifi`` (https://github.com/ansible-collections/community.general/issues/7389).
-- ocapi_utils, oci_utils, redfish_utils module utils - replace ``type()`` calls with ``isinstance()`` calls (https://github.com/ansible-collections/community.general/pull/7501).
-- onepassword lookup plugin - failed for fields that were in sections and had uppercase letters in the label/ID. Field lookups are now case insensitive in all cases (https://github.com/ansible-collections/community.general/pull/7919).
-- onepassword lookup plugin - field and section titles are now case insensitive when using op CLI version two or later. This matches the behavior of version one (https://github.com/ansible-collections/community.general/pull/7564).
-- opentelemetry callback plugin - close spans always (https://github.com/ansible-collections/community.general/pull/8367).
-- opentelemetry callback plugin - honour the ``disable_logs`` option to avoid storing task results since they are not used regardless (https://github.com/ansible-collections/community.general/pull/8373).
-- pacemaker_cluster - actually implement check mode, which the module claims to support. This means that until now the module also did changes in check mode (https://github.com/ansible-collections/community.general/pull/8081).
-- pam_limits - when the file does not exist, do not create it in check mode (https://github.com/ansible-collections/community.general/issues/8050, https://github.com/ansible-collections/community.general/pull/8057).
-- pipx module utils - change the CLI argument formatter for the ``pip_args`` parameter (https://github.com/ansible-collections/community.general/issues/7497, https://github.com/ansible-collections/community.general/pull/7506).
-- pkgin - pkgin (pkgsrc package manager used by SmartOS) raises erratic exceptions and spurious ``changed=true`` (https://github.com/ansible-collections/community.general/pull/7971).
-- proxmox - fix updating a container config if the setting does not already exist (https://github.com/ansible-collections/community.general/pull/7872).
-- proxmox_kvm - fixed status check getting from node-specific API endpoint (https://github.com/ansible-collections/community.general/issues/7817).
-- proxmox_kvm - running ``state=template`` will first check whether VM is already a template (https://github.com/ansible-collections/community.general/pull/7792).
-- proxmox_pool_member - absent state for type VM did not delete VMs from the pools (https://github.com/ansible-collections/community.general/pull/7464).
-- puppet - add option ``environment_lang`` to set the environment language encoding. Defaults to lang ``C``. It is recommended to set it to ``C.UTF-8`` or ``en_US.UTF-8`` depending on what is available on your system. (https://github.com/ansible-collections/community.general/issues/8000)
-- redfish_command - fix usage of message parsing in ``SimpleUpdate`` and ``MultipartHTTPPushUpdate`` commands to treat the lack of a ``MessageId`` as no message (https://github.com/ansible-collections/community.general/issues/7465, https://github.com/ansible-collections/community.general/pull/7471).
-- redfish_info - allow for a GET operation invoked by ``GetUpdateStatus`` to allow for an empty response body for cases where a service returns 204 No Content (https://github.com/ansible-collections/community.general/issues/8003).
-- redfish_info - correct uncaught exception when attempting to retrieve ``Chassis`` information (https://github.com/ansible-collections/community.general/pull/7952).
-- redhat_subscription - use the D-Bus registration on RHEL 7 only on 7.4 and
- greater; older versions of RHEL 7 do not have it
- (https://github.com/ansible-collections/community.general/issues/7622,
- https://github.com/ansible-collections/community.general/pull/7624).
-- riak - support ``riak admin`` sub-command in newer Riak KV versions beside the legacy ``riak-admin`` main command (https://github.com/ansible-collections/community.general/pull/8211).
-- statusio_maintenance - fix error caused by incorrectly formed API data payload. Was raising "Failed to create maintenance HTTP Error 400 Bad Request" caused by bad data type for date/time and deprecated dict keys (https://github.com/ansible-collections/community.general/pull/7754).
-- terraform - fix multiline string handling in complex variables (https://github.com/ansible-collections/community.general/pull/7535).
-- to_ini filter plugin - disabling interpolation of ``ConfigParser`` to allow converting values with a ``%`` sign (https://github.com/ansible-collections/community.general/issues/8183, https://github.com/ansible-collections/community.general/pull/8185).
-- xml - make module work with lxml 5.1.1, which removed some internals that the module was relying on (https://github.com/ansible-collections/community.general/pull/8169).
-
-New Plugins
------------
-
-Become
-~~~~~~
-
-- community.general.run0 - Systemd's run0.
-
-Callback
-~~~~~~~~
-
-- community.general.default_without_diff - The default ansible callback without diff output.
-- community.general.timestamp - Adds simple timestamp for each header.
-
-Connection
-~~~~~~~~~~
-
-- community.general.incus - Run tasks in Incus instances via the Incus CLI.
-
-Filter
-~~~~~~
-
-- community.general.from_ini - Converts INI text input into a dictionary.
-- community.general.lists_difference - Difference of lists with a predictive order.
-- community.general.lists_intersect - Intersection of lists with a predictive order.
-- community.general.lists_symmetric_difference - Symmetric Difference of lists with a predictive order.
-- community.general.lists_union - Union of lists with a predictive order.
-- community.general.to_ini - Converts a dictionary to the INI file format.
-
-Lookup
-~~~~~~
-
-- community.general.github_app_access_token - Obtain short-lived Github App Access tokens.
-- community.general.onepassword_doc - Fetch documents stored in 1Password.
-
-Test
-~~~~
-
-- community.general.fqdn_valid - Validates fully-qualified domain names against RFC 1123.
-
-New Modules
------------
-
-- community.general.consul_acl_bootstrap - Bootstrap ACLs in Consul.
-- community.general.consul_auth_method - Manipulate Consul auth methods.
-- community.general.consul_binding_rule - Manipulate Consul binding rules.
-- community.general.consul_token - Manipulate Consul tokens.
-- community.general.django_command - Run Django admin commands.
-- community.general.dnf_config_manager - Enable or disable dnf repositories using config-manager.
-- community.general.git_config_info - Read git configuration.
-- community.general.gitlab_group_access_token - Manages GitLab group access tokens.
-- community.general.gitlab_issue - Create, update, or delete GitLab issues.
-- community.general.gitlab_label - Creates/updates/deletes GitLab Labels belonging to project or group.
-- community.general.gitlab_milestone - Creates/updates/deletes GitLab Milestones belonging to project or group.
-- community.general.gitlab_project_access_token - Manages GitLab project access tokens.
-- community.general.keycloak_client_rolescope - Allows administration of Keycloak client roles scope to restrict the usage of certain roles to a other specific client applications.
-- community.general.keycloak_component_info - Retrive component info in Keycloak.
-- community.general.keycloak_realm_rolemapping - Allows administration of Keycloak realm role mappings into groups with the Keycloak API.
-- community.general.nomad_token - Manage Nomad ACL tokens.
-- community.general.proxmox_node_info - Retrieve information about one or more Proxmox VE nodes.
-- community.general.proxmox_storage_contents_info - List content from a Proxmox VE storage.
-- community.general.usb_facts - Allows listing information about USB devices.
+This file is a placeholder; a version-specific ``CHANGELOG-vX.rst`` will be generated during releases from fragments
+under ``changelogs/fragments``. On release branches once a release has been created, consult the branch's version-specific
+file for changes that have occurred in that branch.
diff --git a/README.md b/README.md
index b0a1b6197d..dbfc8c0f07 100644
--- a/README.md
+++ b/README.md
@@ -6,10 +6,10 @@ SPDX-License-Identifier: GPL-3.0-or-later
# Community General Collection
-[](https://docs.ansible.com/ansible/latest/collections/community/general/)
-[](https://dev.azure.com/ansible/community.general/_build?definitionId=31)
-[](https://github.com/ansible-collections/community.general/actions)
-[](https://github.com/ansible-collections/community.general/actions)
+[](https://docs.ansible.com/ansible/devel/collections/community/general/)
+[](https://dev.azure.com/ansible/community.general/_build?definitionId=31)
+[](https://github.com/ansible-collections/community.general/actions)
+[](https://github.com/ansible-collections/community.general/actions)
[](https://codecov.io/gh/ansible-collections/community.general)
[](https://api.reuse.software/info/github.com/ansible-collections/community.general)
@@ -39,7 +39,7 @@ For more information about communication, see the [Ansible communication guide](
## Tested with Ansible
-Tested with the current ansible-core 2.13, ansible-core 2.14, ansible-core 2.15, ansible-core 2.16, ansible-core 2.17, ansible-core 2.18, and ansible-core 2.19 releases. Ansible-core versions before 2.13.0 are not supported. This includes all ansible-base 2.10 and Ansible 2.9 releases.
+Tested with the current ansible-core 2.16, ansible-core 2.17, ansible-core 2.18, ansible-core 2.19 releases and the current development version of ansible-core. Ansible-core versions before 2.16.0 are not supported. This includes all ansible-base 2.10 and Ansible 2.9 releases.
## External requirements
@@ -118,7 +118,7 @@ See the [Releasing guidelines](https://github.com/ansible/community-docs/blob/ma
## Release notes
-See the [changelog](https://github.com/ansible-collections/community.general/blob/stable-9/CHANGELOG.md).
+See the [changelog](https://github.com/ansible-collections/community.general/blob/main/CHANGELOG.md).
## Roadmap
@@ -137,8 +137,8 @@ See [this issue](https://github.com/ansible-collections/community.general/issues
This collection is primarily licensed and distributed as a whole under the GNU General Public License v3.0 or later.
-See [LICENSES/GPL-3.0-or-later.txt](https://github.com/ansible-collections/community.general/blob/stable-9/COPYING) for the full text.
+See [LICENSES/GPL-3.0-or-later.txt](https://github.com/ansible-collections/community.general/blob/main/COPYING) for the full text.
-Parts of the collection are licensed under the [BSD 2-Clause license](https://github.com/ansible-collections/community.general/blob/stable-9/LICENSES/BSD-2-Clause.txt), the [MIT license](https://github.com/ansible-collections/community.general/blob/stable-9/LICENSES/MIT.txt), and the [PSF 2.0 license](https://github.com/ansible-collections/community.general/blob/stable-9/LICENSES/PSF-2.0.txt).
+Parts of the collection are licensed under the [BSD 2-Clause license](https://github.com/ansible-collections/community.general/blob/main/LICENSES/BSD-2-Clause.txt), the [MIT license](https://github.com/ansible-collections/community.general/blob/main/LICENSES/MIT.txt), and the [PSF 2.0 license](https://github.com/ansible-collections/community.general/blob/main/LICENSES/PSF-2.0.txt).
All files have a machine readable `SDPX-License-Identifier:` comment denoting its respective license(s) or an equivalent entry in an accompanying `.license` file. Only changelog fragments (which will not be part of a release) are covered by a blanket statement in `REUSE.toml`. This conforms to the [REUSE specification](https://reuse.software/spec/).
diff --git a/antsibull-nox.toml b/antsibull-nox.toml
index 45a3431ec9..c631d3a3af 100644
--- a/antsibull-nox.toml
+++ b/antsibull-nox.toml
@@ -8,24 +8,39 @@
"community.docker" = "git+https://github.com/ansible-collections/community.docker.git,main"
"community.internal_test_tools" = "git+https://github.com/ansible-collections/community.internal_test_tools.git,main"
-[collection_sources_per_ansible.'2.13']
-"community.crypto" = "git+https://github.com/ansible-collections/community.crypto.git,stable-2"
-
-[collection_sources_per_ansible.'2.14']
-"community.crypto" = "git+https://github.com/ansible-collections/community.crypto.git,stable-2"
-
-[collection_sources_per_ansible.'2.15']
-# community.crypto's main branch needs ansible-core >= 2.17
-"community.crypto" = "git+https://github.com/ansible-collections/community.crypto.git,stable-2"
-
[collection_sources_per_ansible.'2.16']
# community.crypto's main branch needs ansible-core >= 2.17
"community.crypto" = "git+https://github.com/ansible-collections/community.crypto.git,stable-2"
[sessions]
+[sessions.lint]
+run_isort = false
+run_black = false
+run_flake8 = false
+run_pylint = false
+run_yamllint = true
+yamllint_config = ".yamllint"
+# yamllint_config_plugins = ".yamllint-docs"
+# yamllint_config_plugins_examples = ".yamllint-examples"
+run_mypy = false
+
[sessions.docs_check]
validate_collection_refs="all"
+codeblocks_restrict_types = [
+ "ansible-output",
+ "console",
+ "ini",
+ "json",
+ "python",
+ "shell",
+ "yaml",
+ "yaml+jinja",
+ "text",
+]
+codeblocks_restrict_type_exact_case = true
+codeblocks_allow_without_type = false
+codeblocks_allow_literal_blocks = false
[sessions.license_check]
@@ -34,30 +49,41 @@ run_no_unwanted_files = true
no_unwanted_files_module_extensions = [".py"]
no_unwanted_files_yaml_extensions = [".yml"]
run_action_groups = true
+run_no_trailing_whitespace = true
+no_trailing_whitespace_skip_paths = [
+ "tests/integration/targets/iso_extract/files/test.iso",
+ "tests/integration/targets/java_cert/files/testpkcs.p12",
+ "tests/integration/targets/one_host/files/testhost/tmp/opennebula-fixtures.json.gz",
+ "tests/integration/targets/one_template/files/testhost/tmp/opennebula-fixtures.json.gz",
+ "tests/integration/targets/setup_flatpak_remote/files/repo.tar.xz",
+]
+no_trailing_whitespace_skip_directories = [
+ "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/",
+ "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/input/",
+]
[[sessions.extra_checks.action_groups_config]]
name = "consul"
pattern = "^consul_.*$"
exclusions = [
- "consul_acl",
"consul_acl_bootstrap",
"consul_kv",
]
doc_fragment = "community.general.consul.actiongroup_consul"
[[sessions.extra_checks.action_groups_config]]
-name = "proxmox"
-pattern = "^proxmox(_.*)?$"
-exclusions = []
-doc_fragment = "community.general.proxmox.actiongroup_proxmox"
+name = "keycloak"
+pattern = "^keycloak_.*$"
+exclusions = [
+ "keycloak_realm_info",
+]
+doc_fragment = "community.general.keycloak.actiongroup_keycloak"
[sessions.build_import_check]
run_galaxy_importer = true
[sessions.ansible_test_sanity]
-include_devel = false
-max_version = "2.19"
+include_devel = true
[sessions.ansible_test_units]
-include_devel = false
-max_version = "2.19"
+include_devel = true
diff --git a/changelogs/changelog.yaml b/changelogs/changelog.yaml
index 94ad901fe6..f8129d5d73 100644
--- a/changelogs/changelog.yaml
+++ b/changelogs/changelog.yaml
@@ -1,1741 +1,3 @@
---
-ancestor: 8.0.0
-releases:
- 9.0.0:
- changes:
- breaking_changes:
- - cpanm - the default of the ``mode`` option changed from ``compatibility``
- to ``new`` (https://github.com/ansible-collections/community.general/pull/8198).
- - django_manage - the module now requires Django >= 4.1 (https://github.com/ansible-collections/community.general/pull/8198).
- - django_manage - the module will now fail if ``virtualenv`` is specified
- but no virtual environment exists at that location (https://github.com/ansible-collections/community.general/pull/8198).
- - redfish_command, redfish_config, redfish_info - change the default for ``timeout``
- from 10 to 60 (https://github.com/ansible-collections/community.general/pull/8198).
- bugfixes:
- - aix_filesystem - fix ``_validate_vg`` not passing VG name to ``lsvg_cmd``
- (https://github.com/ansible-collections/community.general/issues/8151).
- - aix_filesystem - fix issue with empty list items in crfs logic and option
- order (https://github.com/ansible-collections/community.general/pull/8052).
- - apt-rpm - the module did not upgrade packages if a newer version exists.
- Now the package will be reinstalled if the candidate is newer than the installed
- version (https://github.com/ansible-collections/community.general/issues/7414).
- - apt_rpm - when checking whether packages were installed after running ``apt-get
- -y install ``, only the last package name was checked (https://github.com/ansible-collections/community.general/pull/8263).
- - bitwarden_secrets_manager lookup plugin - implements retry with exponential
- backoff to avoid lookup errors when Bitwardn's API rate limiting is encountered
- (https://github.com/ansible-collections/community.general/issues/8230, https://github.com/ansible-collections/community.general/pull/8238).
- - 'cargo - fix idempotency issues when using a custom installation path for
- packages (using the ``--path`` parameter). The initial installation runs
- fine, but subsequent runs use the ``get_installed()`` function which did
- not check the given installation location, before running ``cargo install``.
- This resulted in a false ``changed`` state. Also the removal of packeges
- using ``state: absent`` failed, as the installation check did not use the
- given parameter (https://github.com/ansible-collections/community.general/pull/7970).'
- - cloudflare_dns - fix Cloudflare lookup of SHFP records (https://github.com/ansible-collections/community.general/issues/7652).
- - consul_token - fix token creation without ``accessor_id`` (https://github.com/ansible-collections/community.general/pull/8091).
- - from_ini filter plugin - disabling interpolation of ``ConfigParser`` to
- allow converting values with a ``%`` sign (https://github.com/ansible-collections/community.general/issues/8183,
- https://github.com/ansible-collections/community.general/pull/8185).
- - gitlab_group_members - fix gitlab constants call in ``gitlab_group_members``
- module (https://github.com/ansible-collections/community.general/issues/7467).
- - gitlab_issue - fix behavior to search GitLab issue, using ``search`` keyword
- instead of ``title`` (https://github.com/ansible-collections/community.general/issues/7846).
- - gitlab_issue, gitlab_label, gitlab_milestone - avoid crash during version
- comparison when the python-gitlab Python module is not installed (https://github.com/ansible-collections/community.general/pull/8158).
- - gitlab_project_members - fix gitlab constants call in ``gitlab_project_members``
- module (https://github.com/ansible-collections/community.general/issues/7467).
- - gitlab_protected_branches - fix gitlab constants call in ``gitlab_protected_branches``
- module (https://github.com/ansible-collections/community.general/issues/7467).
- - gitlab_runner - fix pagination when checking for existing runners (https://github.com/ansible-collections/community.general/pull/7790).
- - gitlab_user - fix gitlab constants call in ``gitlab_user`` module (https://github.com/ansible-collections/community.general/issues/7467).
- - haproxy - fix an issue where HAProxy could get stuck in DRAIN mode when
- the backend was unreachable (https://github.com/ansible-collections/community.general/issues/8092).
- - homebrew - detect already installed formulae and casks using JSON output
- from ``brew info`` (https://github.com/ansible-collections/community.general/issues/864).
- - homebrew - error returned from brew command was ignored and tried to parse
- empty JSON. Fix now checks for an error and raises it to give accurate error
- message to users (https://github.com/ansible-collections/community.general/issues/8047).
- - incus connection plugin - treats ``inventory_hostname`` as a variable instead
- of a literal in remote connections (https://github.com/ansible-collections/community.general/issues/7874).
- - interface_files - also consider ``address_family`` when changing ``option=method``
- (https://github.com/ansible-collections/community.general/issues/7610, https://github.com/ansible-collections/community.general/pull/7612).
- - inventory plugins - add unsafe wrapper to avoid marking strings that do
- not contain ``{`` or ``}`` as unsafe, to work around a bug in AWX ((https://github.com/ansible-collections/community.general/issues/8212,
- https://github.com/ansible-collections/community.general/pull/8225).
- - ipa - fix get version regex in IPA module_utils (https://github.com/ansible-collections/community.general/pull/8175).
- - ipa_hbacrule - the module uses a string for ``ipaenabledflag`` for new FreeIPA
- versions while the returned value is a boolean (https://github.com/ansible-collections/community.general/pull/7880).
- - ipa_otptoken - the module expect ``ipatokendisabled`` as string but the
- ``ipatokendisabled`` value is returned as a boolean (https://github.com/ansible-collections/community.general/pull/7795).
- - ipa_sudorule - the module uses a string for ``ipaenabledflag`` for new FreeIPA
- versions while the returned value is a boolean (https://github.com/ansible-collections/community.general/pull/7880).
- - iptables_state - fix idempotency issues when restoring incomplete iptables
- dumps (https://github.com/ansible-collections/community.general/issues/8029).
- - irc - replace ``ssl.wrap_socket`` that was removed from Python 3.12 with
- code for creating a proper SSL context (https://github.com/ansible-collections/community.general/pull/7542).
- - keycloak_* - fix Keycloak API client to quote ``/`` properly (https://github.com/ansible-collections/community.general/pull/7641).
- - keycloak_authz_permission - resource payload variable for scope-based permission
- was constructed as a string, when it needs to be a list, even for a single
- item (https://github.com/ansible-collections/community.general/issues/7151).
- - keycloak_client - add sorted ``defaultClientScopes`` and ``optionalClientScopes``
- to normalizations (https://github.com/ansible-collections/community.general/pull/8223).
- - keycloak_client - fixes issue when metadata is provided in desired state
- when task is in check mode (https://github.com/ansible-collections/community.general/issues/1226,
- https://github.com/ansible-collections/community.general/pull/7881).
- - keycloak_identity_provider - ``mappers`` processing was not idempotent if
- the mappers configuration list had not been sorted by name (in ascending
- order). Fix resolves the issue by sorting mappers in the desired state using
- the same key which is used for obtaining existing state (https://github.com/ansible-collections/community.general/pull/7418).
- - keycloak_identity_provider - it was not possible to reconfigure (add, remove)
- ``mappers`` once they were created initially. Removal was ignored, adding
- new ones resulted in dropping the pre-existing unmodified mappers. Fix resolves
- the issue by supplying correct input to the internal update call (https://github.com/ansible-collections/community.general/pull/7418).
- - keycloak_realm - add normalizations for ``enabledEventTypes`` and ``supportedLocales``
- (https://github.com/ansible-collections/community.general/pull/8224).
- - keycloak_user - when ``force`` is set, but user does not exist, do not try
- to delete it (https://github.com/ansible-collections/community.general/pull/7696).
- - keycloak_user_federation - fix diff of empty ``krbPrincipalAttribute`` (https://github.com/ansible-collections/community.general/pull/8320).
- - ldap - previously the order number (if present) was expected to follow an
- equals sign in the DN. This makes it so the order number string is identified
- correctly anywhere within the DN (https://github.com/ansible-collections/community.general/issues/7646).
- - linode inventory plugin - add descriptive error message for linode inventory
- plugin (https://github.com/ansible-collections/community.general/pull/8133).
- - log_entries callback plugin - replace ``ssl.wrap_socket`` that was removed
- from Python 3.12 with code for creating a proper SSL context (https://github.com/ansible-collections/community.general/pull/7542).
- - lvol - test for output messages in both ``stdout`` and ``stderr`` (https://github.com/ansible-collections/community.general/pull/7601,
- https://github.com/ansible-collections/community.general/issues/7182).
- - 'merge_variables lookup plugin - fixing cross host merge: providing access
- to foreign hosts variables to the perspective of the host that is performing
- the merge (https://github.com/ansible-collections/community.general/pull/8303).'
- - modprobe - listing modules files or modprobe files could trigger a FileNotFoundError
- if ``/etc/modprobe.d`` or ``/etc/modules-load.d`` did not exist. Relevant
- functions now return empty lists if the directories do not exist to avoid
- crashing the module (https://github.com/ansible-collections/community.general/issues/7717).
- - mssql_script - make the module work with Python 2 (https://github.com/ansible-collections/community.general/issues/7818,
- https://github.com/ansible-collections/community.general/pull/7821).
- - nmcli - fix ``connection.slave-type`` wired to ``bond`` and not with parameter
- ``slave_type`` in case of connection type ``wifi`` (https://github.com/ansible-collections/community.general/issues/7389).
- - ocapi_utils, oci_utils, redfish_utils module utils - replace ``type()``
- calls with ``isinstance()`` calls (https://github.com/ansible-collections/community.general/pull/7501).
- - onepassword lookup plugin - failed for fields that were in sections and
- had uppercase letters in the label/ID. Field lookups are now case insensitive
- in all cases (https://github.com/ansible-collections/community.general/pull/7919).
- - onepassword lookup plugin - field and section titles are now case insensitive
- when using op CLI version two or later. This matches the behavior of version
- one (https://github.com/ansible-collections/community.general/pull/7564).
- - opentelemetry callback plugin - close spans always (https://github.com/ansible-collections/community.general/pull/8367).
- - opentelemetry callback plugin - honour the ``disable_logs`` option to avoid
- storing task results since they are not used regardless (https://github.com/ansible-collections/community.general/pull/8373).
- - pacemaker_cluster - actually implement check mode, which the module claims
- to support. This means that until now the module also did changes in check
- mode (https://github.com/ansible-collections/community.general/pull/8081).
- - pam_limits - when the file does not exist, do not create it in check mode
- (https://github.com/ansible-collections/community.general/issues/8050, https://github.com/ansible-collections/community.general/pull/8057).
- - pipx module utils - change the CLI argument formatter for the ``pip_args``
- parameter (https://github.com/ansible-collections/community.general/issues/7497,
- https://github.com/ansible-collections/community.general/pull/7506).
- - pkgin - pkgin (pkgsrc package manager used by SmartOS) raises erratic exceptions
- and spurious ``changed=true`` (https://github.com/ansible-collections/community.general/pull/7971).
- - proxmox - fix updating a container config if the setting does not already
- exist (https://github.com/ansible-collections/community.general/pull/7872).
- - proxmox_kvm - fixed status check getting from node-specific API endpoint
- (https://github.com/ansible-collections/community.general/issues/7817).
- - proxmox_kvm - running ``state=template`` will first check whether VM is
- already a template (https://github.com/ansible-collections/community.general/pull/7792).
- - proxmox_pool_member - absent state for type VM did not delete VMs from the
- pools (https://github.com/ansible-collections/community.general/pull/7464).
- - puppet - add option ``environment_lang`` to set the environment language
- encoding. Defaults to lang ``C``. It is recommended to set it to ``C.UTF-8``
- or ``en_US.UTF-8`` depending on what is available on your system. (https://github.com/ansible-collections/community.general/issues/8000)
- - redfish_command - fix usage of message parsing in ``SimpleUpdate`` and ``MultipartHTTPPushUpdate``
- commands to treat the lack of a ``MessageId`` as no message (https://github.com/ansible-collections/community.general/issues/7465,
- https://github.com/ansible-collections/community.general/pull/7471).
- - redfish_info - allow for a GET operation invoked by ``GetUpdateStatus``
- to allow for an empty response body for cases where a service returns 204
- No Content (https://github.com/ansible-collections/community.general/issues/8003).
- - redfish_info - correct uncaught exception when attempting to retrieve ``Chassis``
- information (https://github.com/ansible-collections/community.general/pull/7952).
- - 'redhat_subscription - use the D-Bus registration on RHEL 7 only on 7.4
- and
-
- greater; older versions of RHEL 7 do not have it
-
- (https://github.com/ansible-collections/community.general/issues/7622,
-
- https://github.com/ansible-collections/community.general/pull/7624).
-
- '
- - riak - support ``riak admin`` sub-command in newer Riak KV versions beside
- the legacy ``riak-admin`` main command (https://github.com/ansible-collections/community.general/pull/8211).
- - statusio_maintenance - fix error caused by incorrectly formed API data payload.
- Was raising "Failed to create maintenance HTTP Error 400 Bad Request" caused
- by bad data type for date/time and deprecated dict keys (https://github.com/ansible-collections/community.general/pull/7754).
- - terraform - fix multiline string handling in complex variables (https://github.com/ansible-collections/community.general/pull/7535).
- - to_ini filter plugin - disabling interpolation of ``ConfigParser`` to allow
- converting values with a ``%`` sign (https://github.com/ansible-collections/community.general/issues/8183,
- https://github.com/ansible-collections/community.general/pull/8185).
- - xml - make module work with lxml 5.1.1, which removed some internals that
- the module was relying on (https://github.com/ansible-collections/community.general/pull/8169).
- deprecated_features:
- - MH DependencyCtxMgr module_utils - deprecate ``module_utils.mh.mixin.deps.DependencyCtxMgr``
- in favour of ``module_utils.deps`` (https://github.com/ansible-collections/community.general/pull/8280).
- - ModuleHelper module_utils - deprecate ``plugins.module_utils.module_helper.AnsibleModule``
- (https://github.com/ansible-collections/community.general/pull/8280).
- - ModuleHelper module_utils - deprecate ``plugins.module_utils.module_helper.DependencyCtxMgr``
- (https://github.com/ansible-collections/community.general/pull/8280).
- - ModuleHelper module_utils - deprecate ``plugins.module_utils.module_helper.StateMixin``
- (https://github.com/ansible-collections/community.general/pull/8280).
- - ModuleHelper module_utils - deprecate ``plugins.module_utils.module_helper.VarDict,``
- (https://github.com/ansible-collections/community.general/pull/8280).
- - ModuleHelper module_utils - deprecate ``plugins.module_utils.module_helper.VarMeta``
- (https://github.com/ansible-collections/community.general/pull/8280).
- - ModuleHelper module_utils - deprecate ``plugins.module_utils.module_helper.VarsMixin``
- (https://github.com/ansible-collections/community.general/pull/8280).
- - ModuleHelper module_utils - deprecate use of ``VarsMixin`` in favor of using
- the ``VardDict`` module_utils (https://github.com/ansible-collections/community.general/pull/8226).
- - ModuleHelper vars module_utils - bump deprecation of ``VarMeta``, ``VarDict``
- and ``VarsMixin`` to version 11.0.0 (https://github.com/ansible-collections/community.general/pull/8226).
- - apt_rpm - the behavior of ``state=present`` and ``state=installed`` is deprecated
- and will change in community.general 11.0.0. Right now the module will upgrade
- a package to the latest version if one of these two states is used. You
- should explicitly use ``state=latest`` if you want this behavior, and switch
- to ``state=present_not_latest`` if you do not want to upgrade the package
- if it is already installed. In community.general 11.0.0 the behavior of
- ``state=present`` and ``state=installed`` will change to that of ``state=present_not_latest``
- (https://github.com/ansible-collections/community.general/issues/8217, https://github.com/ansible-collections/community.general/pull/8285).
- - consul_acl - the module has been deprecated and will be removed in community.general
- 10.0.0. ``consul_token`` and ``consul_policy`` can be used instead (https://github.com/ansible-collections/community.general/pull/7901).
- - django_manage - the ``ack_venv_creation_deprecation`` option has no more
- effect and will be removed from community.general 11.0.0 (https://github.com/ansible-collections/community.general/pull/8198).
- - gitlab modules - the basic auth method on GitLab API have been deprecated
- and will be removed in community.general 10.0.0 (https://github.com/ansible-collections/community.general/pull/8383).
- - hipchat callback plugin - the hipchat service has been discontinued and
- the self-hosted variant has been End of Life since 2020. The callback plugin
- is therefore deprecated and will be removed from community.general 10.0.0
- if nobody provides compelling reasons to still keep it (https://github.com/ansible-collections/community.general/issues/8184,
- https://github.com/ansible-collections/community.general/pull/8189).
- - irc - the defaults ``false`` for ``use_tls`` and ``validate_certs`` have
- been deprecated and will change to ``true`` in community.general 10.0.0
- to improve security. You can already improve security now by explicitly
- setting them to ``true``. Specifying values now disables the deprecation
- warning (https://github.com/ansible-collections/community.general/pull/7578).
- minor_changes:
- - PythonRunner module utils - specialisation of ``CmdRunner`` to execute Python
- scripts (https://github.com/ansible-collections/community.general/pull/8289).
- - Use offset-aware ``datetime.datetime`` objects (with timezone UTC) instead
- of offset-naive UTC timestamps, which are deprecated in Python 3.12 (https://github.com/ansible-collections/community.general/pull/8222).
- - aix_lvol - refactor module to pass list of arguments to ``module.run_command()``
- instead of relying on interpretation by a shell (https://github.com/ansible-collections/community.general/pull/8264).
- - 'apt_rpm - add new states ``latest`` and ``present_not_latest``. The value
- ``latest`` is equivalent to the current behavior of ``present``, which will
- upgrade a package if a newer version exists. ``present_not_latest`` does
- what most users would expect ``present`` to do: it does not upgrade if the
- package is already installed. The current behavior of ``present`` will be
- deprecated in a later version, and eventually changed to that of ``present_not_latest``
- (https://github.com/ansible-collections/community.general/issues/8217, https://github.com/ansible-collections/community.general/pull/8247).'
- - apt_rpm - refactor module to pass list of arguments to ``module.run_command()``
- instead of relying on interpretation by a shell (https://github.com/ansible-collections/community.general/pull/8264).
- - bitwarden lookup plugin - add ``bw_session`` option, to pass session key
- instead of reading from env (https://github.com/ansible-collections/community.general/pull/7994).
- - bitwarden lookup plugin - add support to filter by organization ID (https://github.com/ansible-collections/community.general/pull/8188).
- - bitwarden lookup plugin - allows to fetch all records of a given collection
- ID, by allowing to pass an empty value for ``search_value`` when ``collection_id``
- is provided (https://github.com/ansible-collections/community.general/pull/8013).
- - bitwarden lookup plugin - when looking for items using an item ID, the item
- is now accessed directly with ``bw get item`` instead of searching through
- all items. This doubles the lookup speed (https://github.com/ansible-collections/community.general/pull/7468).
- - btrfs_subvolume - refactor module to pass list of arguments to ``module.run_command()``
- instead of relying on interpretation by a shell (https://github.com/ansible-collections/community.general/pull/8264).
- - cmd_runner module_utils - add validation for minimum and maximum length
- in the value passed to ``cmd_runner_fmt.as_list()`` (https://github.com/ansible-collections/community.general/pull/8288).
- - consul_auth_method, consul_binding_rule, consul_policy, consul_role, consul_session,
- consul_token - added action group ``community.general.consul`` (https://github.com/ansible-collections/community.general/pull/7897).
- - consul_policy - added support for diff and check mode (https://github.com/ansible-collections/community.general/pull/7878).
- - consul_policy, consul_role, consul_session - removed dependency on ``requests``
- and factored out common parts (https://github.com/ansible-collections/community.general/pull/7826,
- https://github.com/ansible-collections/community.general/pull/7878).
- - consul_role - ``node_identities`` now expects a ``node_name`` option to
- match the Consul API, the old ``name`` is still supported as alias (https://github.com/ansible-collections/community.general/pull/7878).
- - consul_role - ``service_identities`` now expects a ``service_name`` option
- to match the Consul API, the old ``name`` is still supported as alias (https://github.com/ansible-collections/community.general/pull/7878).
- - consul_role - added support for diff mode (https://github.com/ansible-collections/community.general/pull/7878).
- - consul_role - added support for templated policies (https://github.com/ansible-collections/community.general/pull/7878).
- - elastic callback plugin - close elastic client to not leak resources (https://github.com/ansible-collections/community.general/pull/7517).
- - filesystem - add bcachefs support (https://github.com/ansible-collections/community.general/pull/8126).
- - gandi_livedns - adds support for personal access tokens (https://github.com/ansible-collections/community.general/issues/7639,
- https://github.com/ansible-collections/community.general/pull/8337).
- - gconftool2 - use ``ModuleHelper`` with ``VarDict`` (https://github.com/ansible-collections/community.general/pull/8226).
- - git_config - allow multiple git configs for the same name with the new ``add_mode``
- option (https://github.com/ansible-collections/community.general/pull/7260).
- - git_config - the ``after`` and ``before`` fields in the ``diff`` of the
- return value can be a list instead of a string in case more configs with
- the same key are affected (https://github.com/ansible-collections/community.general/pull/7260).
- - git_config - when a value is unset, all configs with the same key are unset
- (https://github.com/ansible-collections/community.general/pull/7260).
- - gitlab modules - add ``ca_path`` option (https://github.com/ansible-collections/community.general/pull/7472).
- - gitlab modules - remove duplicate ``gitlab`` package check (https://github.com/ansible-collections/community.general/pull/7486).
- - gitlab_deploy_key, gitlab_group_members, gitlab_group_variable, gitlab_hook,
- gitlab_instance_variable, gitlab_project_badge, gitlab_project_variable,
- gitlab_user - improve API pagination and compatibility with different versions
- of ``python-gitlab`` (https://github.com/ansible-collections/community.general/pull/7790).
- - gitlab_hook - adds ``releases_events`` parameter for supporting Releases
- events triggers on GitLab hooks (https://github.com/ansible-collections/community.general/pull/7956).
- - gitlab_runner - add support for new runner creation workflow (https://github.com/ansible-collections/community.general/pull/7199).
- - homebrew - adds ``force_formula`` parameter to disambiguate a formula from
- a cask of the same name (https://github.com/ansible-collections/community.general/issues/8274).
- - homebrew, homebrew_cask - refactor common argument validation logic into
- a dedicated ``homebrew`` module utils (https://github.com/ansible-collections/community.general/issues/8323,
- https://github.com/ansible-collections/community.general/pull/8324).
- - icinga2 inventory plugin - add Jinja2 templating support to ``url``, ``user``,
- and ``password`` paramenters (https://github.com/ansible-collections/community.general/issues/7074,
- https://github.com/ansible-collections/community.general/pull/7996).
- - icinga2 inventory plugin - adds new parameter ``group_by_hostgroups`` in
- order to make grouping by Icinga2 hostgroups optional (https://github.com/ansible-collections/community.general/pull/7998).
- - ini_file - add an optional parameter ``section_has_values``. If the target
- ini file contains more than one ``section``, use ``section_has_values``
- to specify which one should be updated (https://github.com/ansible-collections/community.general/pull/7505).
- - ini_file - support optional spaces between section names and their surrounding
- brackets (https://github.com/ansible-collections/community.general/pull/8075).
- - installp - refactor module to pass list of arguments to ``module.run_command()``
- instead of relying on interpretation by a shell (https://github.com/ansible-collections/community.general/pull/8264).
- - ipa_config - adds ``passkey`` choice to ``ipauserauthtype`` parameter's
- choices (https://github.com/ansible-collections/community.general/pull/7588).
- - ipa_dnsrecord - adds ability to manage NS record types (https://github.com/ansible-collections/community.general/pull/7737).
- - ipa_pwpolicy - refactor module and exchange a sequence ``if`` statements
- with a ``for`` loop (https://github.com/ansible-collections/community.general/pull/7723).
- - ipa_pwpolicy - update module to support ``maxrepeat``, ``maxsequence``,
- ``dictcheck``, ``usercheck``, ``gracelimit`` parameters in FreeIPA password
- policies (https://github.com/ansible-collections/community.general/pull/7723).
- - ipa_sudorule - adds options to include denied commands or command groups
- (https://github.com/ansible-collections/community.general/pull/7415).
- - ipa_user - adds ``idp`` and ``passkey`` choice to ``ipauserauthtype`` parameter's
- choices (https://github.com/ansible-collections/community.general/pull/7589).
- - irc - add ``validate_certs`` option, and rename ``use_ssl`` to ``use_tls``,
- while keeping ``use_ssl`` as an alias. The default value for ``validate_certs``
- is ``false`` for backwards compatibility. We recommend to every user of
- this module to explicitly set ``use_tls=true`` and `validate_certs=true``
- whenever possible, especially when communicating to IRC servers over the
- internet (https://github.com/ansible-collections/community.general/pull/7550).
- - java_cert - add ``cert_content`` argument (https://github.com/ansible-collections/community.general/pull/8153).
- - java_cert - enable ``owner``, ``group``, ``mode``, and other generic file
- arguments (https://github.com/ansible-collections/community.general/pull/8116).
- - kernel_blacklist - use ``ModuleHelper`` with ``VarDict`` (https://github.com/ansible-collections/community.general/pull/8226).
- - keycloak module utils - expose error message from Keycloak server for HTTP
- errors in some specific situations (https://github.com/ansible-collections/community.general/pull/7645).
- - keycloak_client, keycloak_clientscope, keycloak_clienttemplate - added ``docker-v2``
- protocol support, enhancing alignment with Keycloak's protocol options (https://github.com/ansible-collections/community.general/issues/8215,
- https://github.com/ansible-collections/community.general/pull/8216).
- - keycloak_realm_key - the ``config.algorithm`` option now supports 8 additional
- key algorithms (https://github.com/ansible-collections/community.general/pull/7698).
- - keycloak_realm_key - the ``config.certificate`` option value is no longer
- defined with ``no_log=True`` (https://github.com/ansible-collections/community.general/pull/7698).
- - keycloak_realm_key - the ``provider_id`` option now supports RSA encryption
- key usage (value ``rsa-enc``) (https://github.com/ansible-collections/community.general/pull/7698).
- - keycloak_user_federation - add option for ``krbPrincipalAttribute`` (https://github.com/ansible-collections/community.general/pull/7538).
- - keycloak_user_federation - allow custom user storage providers to be set
- through ``provider_id`` (https://github.com/ansible-collections/community.general/pull/7789).
- - ldap_attrs - module now supports diff mode, showing which attributes are
- changed within an operation (https://github.com/ansible-collections/community.general/pull/8073).
- - lvg - refactor module to pass list of arguments to ``module.run_command()``
- instead of relying on interpretation by a shell (https://github.com/ansible-collections/community.general/pull/8264).
- - lvol - change ``pvs`` argument type to list of strings (https://github.com/ansible-collections/community.general/pull/7676,
- https://github.com/ansible-collections/community.general/issues/7504).
- - lvol - refactor module to pass list of arguments to ``module.run_command()``
- instead of relying on interpretation by a shell (https://github.com/ansible-collections/community.general/pull/8264).
- - 'lxd connection plugin - tighten the detection logic for lxd ``Instance
- not found`` errors, to avoid false detection on unrelated errors such as
- ``/usr/bin/python3: not found`` (https://github.com/ansible-collections/community.general/pull/7521).'
- - lxd_container - uses ``/1.0/instances`` API endpoint, if available. Falls
- back to ``/1.0/containers`` or ``/1.0/virtual-machines``. Fixes issue when
- using Incus or LXD 5.19 due to migrating to ``/1.0/instances`` endpoint
- (https://github.com/ansible-collections/community.general/pull/7980).
- - macports - refactor module to pass list of arguments to ``module.run_command()``
- instead of relying on interpretation by a shell (https://github.com/ansible-collections/community.general/pull/8264).
- - mail - add ``Message-ID`` header; which is required by some mail servers
- (https://github.com/ansible-collections/community.general/pull/7740).
- - mail module, mail callback plugin - allow to configure the domain name of
- the Message-ID header with a new ``message_id_domain`` option (https://github.com/ansible-collections/community.general/pull/7765).
- - mssql_script - adds transactional (rollback/commit) support via optional
- boolean param ``transaction`` (https://github.com/ansible-collections/community.general/pull/7976).
- - netcup_dns - adds support for record types ``OPENPGPKEY``, ``SMIMEA``, and
- ``SSHFP`` (https://github.com/ansible-collections/community.general/pull/7489).
- - nmcli - add support for new connection type ``loopback`` (https://github.com/ansible-collections/community.general/issues/6572).
- - nmcli - adds OpenvSwitch support with new ``type`` values ``ovs-port``,
- ``ovs-interface``, and ``ovs-bridge``, and new ``slave_type`` value ``ovs-port``
- (https://github.com/ansible-collections/community.general/pull/8154).
- - nmcli - allow for ``infiniband`` slaves of ``bond`` interface types (https://github.com/ansible-collections/community.general/pull/7569).
- - nmcli - allow for the setting of ``MTU`` for ``infiniband`` and ``bond``
- interface types (https://github.com/ansible-collections/community.general/pull/7499).
- - nmcli - allow setting ``MTU`` for ``bond-slave`` interface types (https://github.com/ansible-collections/community.general/pull/8118).
- - onepassword lookup plugin - support 1Password Connect with the opv2 client
- by setting the connect_host and connect_token parameters (https://github.com/ansible-collections/community.general/pull/7116).
- - onepassword_raw lookup plugin - support 1Password Connect with the opv2
- client by setting the connect_host and connect_token parameters (https://github.com/ansible-collections/community.general/pull/7116)
- - opentelemetry - add support for HTTP trace_exporter and configures the behavior
- via ``OTEL_EXPORTER_OTLP_TRACES_PROTOCOL`` (https://github.com/ansible-collections/community.general/issues/7888,
- https://github.com/ansible-collections/community.general/pull/8321).
- - opentelemetry - add support for exporting spans in a file via ``ANSIBLE_OPENTELEMETRY_STORE_SPANS_IN_FILE``
- (https://github.com/ansible-collections/community.general/issues/7888, https://github.com/ansible-collections/community.general/pull/8363).
- - opkg - use ``ModuleHelper`` with ``VarDict`` (https://github.com/ansible-collections/community.general/pull/8226).
- - osx_defaults - add option ``check_types`` to enable changing the type of
- existing defaults on the fly (https://github.com/ansible-collections/community.general/pull/8173).
- - parted - refactor module to pass list of arguments to ``module.run_command()``
- instead of relying on interpretation by a shell (https://github.com/ansible-collections/community.general/pull/8264).
- - passwordstore - adds ``timestamp`` and ``preserve`` parameters to modify
- the stored password format (https://github.com/ansible-collections/community.general/pull/7426).
- - passwordstore lookup - add ``missing_subkey`` parameter defining the behavior
- of the lookup when a passwordstore subkey is missing (https://github.com/ansible-collections/community.general/pull/8166).
- - pipx - use ``ModuleHelper`` with ``VarDict`` (https://github.com/ansible-collections/community.general/pull/8226).
- - pkg5 - add support for non-silent execution (https://github.com/ansible-collections/community.general/issues/8379,
- https://github.com/ansible-collections/community.general/pull/8382).
- - pkgin - refactor module to pass list of arguments to ``module.run_command()``
- instead of relying on interpretation by a shell (https://github.com/ansible-collections/community.general/pull/8264).
- - portage - adds the possibility to explicitely tell portage to write packages
- to world file (https://github.com/ansible-collections/community.general/issues/6226,
- https://github.com/ansible-collections/community.general/pull/8236).
- - portinstall - refactor module to pass list of arguments to ``module.run_command()``
- instead of relying on interpretation by a shell (https://github.com/ansible-collections/community.general/pull/8264).
- - proxmox - adds ``startup`` parameters to configure startup order, startup
- delay and shutdown delay (https://github.com/ansible-collections/community.general/pull/8038).
- - proxmox - adds ``template`` value to the ``state`` parameter, allowing conversion
- of container to a template (https://github.com/ansible-collections/community.general/pull/7143).
- - proxmox - adds ``update`` parameter, allowing update of an already existing
- containers configuration (https://github.com/ansible-collections/community.general/pull/7540).
- - proxmox inventory plugin - adds an option to exclude nodes from the dynamic
- inventory generation. The new setting is optional, not using this option
- will behave as usual (https://github.com/ansible-collections/community.general/issues/6714,
- https://github.com/ansible-collections/community.general/pull/7461).
- - proxmox* modules - there is now a ``community.general.proxmox`` module defaults
- group that can be used to set default options for all Proxmox modules (https://github.com/ansible-collections/community.general/pull/8334).
- - proxmox_disk - add ability to manipulate CD-ROM drive (https://github.com/ansible-collections/community.general/pull/7495).
- - proxmox_kvm - add parameter ``update_unsafe`` to avoid limitations when
- updating dangerous values (https://github.com/ansible-collections/community.general/pull/7843).
- - proxmox_kvm - adds ``template`` value to the ``state`` parameter, allowing
- conversion of a VM to a template (https://github.com/ansible-collections/community.general/pull/7143).
- - proxmox_kvm - adds``usb`` parameter for setting USB devices on proxmox KVM
- VMs (https://github.com/ansible-collections/community.general/pull/8199).
- - proxmox_kvm - support the ``hookscript`` parameter (https://github.com/ansible-collections/community.general/issues/7600).
- - proxmox_ostype - it is now possible to specify the ``ostype`` when creating
- an LXC container (https://github.com/ansible-collections/community.general/pull/7462).
- - proxmox_vm_info - add ability to retrieve configuration info (https://github.com/ansible-collections/community.general/pull/7485).
- - puppet - new feature to set ``--waitforlock`` option (https://github.com/ansible-collections/community.general/pull/8282).
- - redfish_command - add command ``ResetToDefaults`` to reset manager to default
- state (https://github.com/ansible-collections/community.general/issues/8163).
- - redfish_config - add command ``SetServiceIdentification`` to set service
- identification (https://github.com/ansible-collections/community.general/issues/7916).
- - redfish_info - add boolean return value ``MultipartHttpPush`` to ``GetFirmwareUpdateCapabilities``
- (https://github.com/ansible-collections/community.general/issues/8194, https://github.com/ansible-collections/community.general/pull/8195).
- - redfish_info - add command ``GetServiceIdentification`` to get service identification
- (https://github.com/ansible-collections/community.general/issues/7882).
- - redfish_info - adding the ``BootProgress`` property when getting ``Systems``
- info (https://github.com/ansible-collections/community.general/pull/7626).
- - revbitspss lookup plugin - removed a redundant unicode prefix. The prefix
- was not necessary for Python 3 and has been cleaned up to streamline the
- code (https://github.com/ansible-collections/community.general/pull/8087).
- - rundeck module utils - allow to pass ``Content-Type`` to API requests (https://github.com/ansible-collections/community.general/pull/7684).
- - slackpkg - refactor module to pass list of arguments to ``module.run_command()``
- instead of relying on interpretation by a shell (https://github.com/ansible-collections/community.general/pull/8264).
- - ssh_config - adds ``controlmaster``, ``controlpath`` and ``controlpersist``
- parameters (https://github.com/ansible-collections/community.general/pull/7456).
- - ssh_config - allow ``accept-new`` as valid value for ``strict_host_key_checking``
- (https://github.com/ansible-collections/community.general/pull/8257).
- - ssh_config - new feature to set ``AddKeysToAgent`` option to ``yes`` or
- ``no`` (https://github.com/ansible-collections/community.general/pull/7703).
- - ssh_config - new feature to set ``IdentitiesOnly`` option to ``yes`` or
- ``no`` (https://github.com/ansible-collections/community.general/pull/7704).
- - sudoers - add support for the ``NOEXEC`` tag in sudoers rules (https://github.com/ansible-collections/community.general/pull/7983).
- - svr4pkg - refactor module to pass list of arguments to ``module.run_command()``
- instead of relying on interpretation by a shell (https://github.com/ansible-collections/community.general/pull/8264).
- - swdepot - refactor module to pass list of arguments to ``module.run_command()``
- instead of relying on interpretation by a shell (https://github.com/ansible-collections/community.general/pull/8264).
- - terraform - add support for ``diff_mode`` for terraform resource_changes
- (https://github.com/ansible-collections/community.general/pull/7896).
- - terraform - fix ``diff_mode`` in state ``absent`` and when terraform ``resource_changes``
- does not exist (https://github.com/ansible-collections/community.general/pull/7963).
- - xcc_redfish_command - added support for raw POSTs (``command=PostResource``
- in ``category=Raw``) without a specific action info (https://github.com/ansible-collections/community.general/pull/7746).
- - xfconf - use ``ModuleHelper`` with ``VarDict`` (https://github.com/ansible-collections/community.general/pull/8226).
- - xfconf_info - use ``ModuleHelper`` with ``VarDict`` (https://github.com/ansible-collections/community.general/pull/8226).
- release_summary: This is release 9.0.0 of ``community.general``, released on
- 2024-05-20.
- removed_features:
- - The deprecated redirects for internal module names have been removed. These
- internal redirects were extra-long FQCNs like ``community.general.packaging.os.apt_rpm``
- that redirect to the short FQCN ``community.general.apt_rpm``. They were
- originally needed to implement flatmapping; as various tooling started to
- recommend users to use the long names flatmapping was removed from the collection
- and redirects were added for users who already followed these incorrect
- recommendations (https://github.com/ansible-collections/community.general/pull/7835).
- - ansible_galaxy_install - the ``ack_ansible29`` and ``ack_min_ansiblecore211``
- options have been removed. They no longer had any effect (https://github.com/ansible-collections/community.general/pull/8198).
- - cloudflare_dns - remove support for SPF records. These are no longer supported
- by CloudFlare (https://github.com/ansible-collections/community.general/pull/7782).
- - django_manage - support for the ``command`` values ``cleanup``, ``syncdb``,
- and ``validate`` were removed. Use ``clearsessions``, ``migrate``, and ``check``
- instead, respectively (https://github.com/ansible-collections/community.general/pull/8198).
- - flowdock - this module relied on HTTPS APIs that do not exist anymore and
- was thus removed (https://github.com/ansible-collections/community.general/pull/8198).
- - mh.mixins.deps module utils - the ``DependencyMixin`` has been removed.
- Use the ``deps`` module utils instead (https://github.com/ansible-collections/community.general/pull/8198).
- - proxmox - the ``proxmox_default_behavior`` option has been removed (https://github.com/ansible-collections/community.general/pull/8198).
- - rax* modules, rax module utils, rax docs fragment - the Rackspace modules
- relied on the deprecated package ``pyrax`` and were thus removed (https://github.com/ansible-collections/community.general/pull/8198).
- - redhat module utils - the classes ``Rhsm``, ``RhsmPool``, and ``RhsmPools``
- have been removed (https://github.com/ansible-collections/community.general/pull/8198).
- - redhat_subscription - the alias ``autosubscribe`` of the ``auto_attach``
- option was removed (https://github.com/ansible-collections/community.general/pull/8198).
- - stackdriver - this module relied on HTTPS APIs that do not exist anymore
- and was thus removed (https://github.com/ansible-collections/community.general/pull/8198).
- - webfaction_* modules - these modules relied on HTTPS APIs that do not exist
- anymore and were thus removed (https://github.com/ansible-collections/community.general/pull/8198).
- security_fixes:
- - cobbler, gitlab_runners, icinga2, linode, lxd, nmap, online, opennebula,
- proxmox, scaleway, stackpath_compute, virtualbox, and xen_orchestra inventory
- plugin - make sure all data received from the remote servers is marked as
- unsafe, so remote code execution by obtaining texts that can be evaluated
- as templates is not possible (https://www.die-welt.net/2024/03/remote-code-execution-in-ansible-dynamic-inventory-plugins/,
- https://github.com/ansible-collections/community.general/pull/8098).
- - keycloak_identity_provider - the client secret was not correctly sanitized
- by the module. The return values ``proposed``, ``existing``, and ``end_state``,
- as well as the diff, did contain the client secret unmasked (https://github.com/ansible-collections/community.general/pull/8355).
- fragments:
- - 000-redhat_subscription-dbus-on-7.4-plus.yaml
- - 5588-support-1password-connect.yml
- - 6572-nmcli-add-support-loopback-type.yml
- - 7143-proxmox-template.yml
- - 7151-fix-keycloak_authz_permission-incorrect-resource-payload.yml
- - 7199-gitlab-runner-new-creation-workflow.yml
- - 7242-multi-values-for-same-name-in-git-config.yml
- - 7389-nmcli-issue-with-creating-a-wifi-bridge-slave.yml
- - 7418-kc_identity_provider-mapper-reconfiguration-fixes.yml
- - 7426-add-timestamp-and-preserve-options-for-passwordstore.yaml
- - 7456-add-ssh-control-master.yml
- - 7461-proxmox-inventory-add-exclude-nodes.yaml
- - 7462-Add-ostype-parameter-in-LXC-container-clone-of-ProxmoxVE.yaml
- - 7464-fix-vm-removal-in-proxmox_pool_member.yml
- - 7465-redfish-firmware-update-message-id-hardening.yml
- - 7467-fix-gitlab-constants-calls.yml
- - 7472-gitlab-add-ca-path-option.yml
- - 7485-proxmox_vm_info-config.yml
- - 7486-gitlab-refactor-package-check.yml
- - 7489-netcup-dns-record-types.yml
- - 7495-proxmox_disk-manipulate-cdrom.yml
- - 7499-allow-mtu-setting-on-bond-and-infiniband-interfaces.yml
- - 7501-type.yml
- - 7505-ini_file-section_has.yml
- - 7506-pipx-pipargs.yml
- - 7517-elastic-close-client.yaml
- - 7535-terraform-fix-multiline-string-handling-in-complex-variables.yml
- - 7538-add-krbprincipalattribute-option.yml
- - 7540-proxmox-update-config.yml
- - 7542-irc-logentries-ssl.yml
- - 7550-irc-use_tls-validate_certs.yml
- - 7564-onepassword-lookup-case-insensitive.yaml
- - 7569-infiniband-slave-support.yml
- - 7577-fix-apt_rpm-module.yml
- - 7578-irc-tls.yml
- - 7588-ipa-config-new-choice-passkey-to-ipauserauthtype.yml
- - 7589-ipa-config-new-choices-idp-and-passkey-to-ipauserauthtype.yml
- - 7600-proxmox_kvm-hookscript.yml
- - 7601-lvol-fix.yml
- - 7612-interface_file-method.yml
- - 7626-redfish-info-add-boot-progress-property.yml
- - 7641-fix-keycloak-api-client-to-quote-properly.yml
- - 7645-Keycloak-print-error-msg-from-server.yml
- - 7646-fix-order-number-detection-in-dn.yml
- - 7653-fix-cloudflare-lookup.yml
- - 7676-lvol-pvs-as-list.yml
- - 7683-added-contenttype-parameter.yml
- - 7696-avoid-attempt-to-delete-non-existing-user.yml
- - 7698-improvements-to-keycloak_realm_key.yml
- - 7703-ssh_config_add_keys_to_agent_option.yml
- - 7704-ssh_config_identities_only_option.yml
- - 7717-prevent-modprobe-error.yml
- - 7723-ipa-pwpolicy-update-pwpolicy-module.yml
- - 7737-add-ipa-dnsrecord-ns-type.yml
- - 7740-add-message-id-header-to-mail-module.yml
- - 7746-raw_post-without-actions.yml
- - 7754-fixed-payload-format.yml
- - 7765-mail-message-id.yml
- - 7782-cloudflare_dns-spf.yml
- - 7789-keycloak-user-federation-custom-provider-type.yml
- - 7790-gitlab-runner-api-pagination.yml
- - 7791-proxmox_kvm-state-template-will-check-status-first.yaml
- - 7797-ipa-fix-otp-idempotency.yml
- - 7821-mssql_script-py2.yml
- - 7826-consul-modules-refactoring.yaml
- - 7843-proxmox_kvm-update_unsafe.yml
- - 7847-gitlab-issue-title.yml
- - 7870-homebrew-cask-installed-detection.yml
- - 7872-proxmox_fix-update-if-setting-doesnt-exist.yaml
- - 7874-incus_connection_treats_inventory_hostname_as_literal_in_remotes.yml
- - 7880-ipa-fix-sudo-and-hbcalrule-idempotence.yml
- - 7881-fix-keycloak-client-ckeckmode.yml
- - 7882-add-redfish-get-service-identification.yml
- - 7896-add-terraform-diff-mode.yml
- - 7897-consul-action-group.yaml
- - 7901-consul-acl-deprecation.yaml
- - 7916-add-redfish-set-service-identification.yml
- - 7919-onepassword-fieldname-casing.yaml
- - 7951-fix-redfish_info-exception.yml
- - 7953-proxmox_kvm-fix_status_check.yml
- - 7956-adding-releases_events-option-to-gitlab_hook-module.yaml
- - 7963-fix-terraform-diff-absent.yml
- - 7970-fix-cargo-path-idempotency.yaml
- - 7976-add-mssql_script-transactional-support.yml
- - 7983-sudoers-add-support-noexec.yml
- - 7994-bitwarden-session-arg.yaml
- - 7996-add-templating-support-to-icinga2-inventory.yml
- - 7998-icinga2-inventory-group_by_hostgroups-parameter.yml
- - 8003-redfish-get-update-status-empty-response.yml
- - 8013-bitwarden-full-collection-item-list.yaml
- - 8029-iptables-state-restore-check-mode.yml
- - 8038-proxmox-startup.yml
- - 8048-fix-homebrew-module-error-reporting-on-become-true.yaml
- - 8057-pam_limits-check-mode.yml
- - 8073-ldap-attrs-diff.yml
- - 8075-optional-space-around-section-names.yaml
- - 8087-removed-redundant-unicode-prefixes.yml
- - 8091-consul-token-fixes.yaml
- - 8100-haproxy-drain-fails-on-down-backend.yml
- - 8116-java_cert-enable-owner-group-mode-args.yml
- - 8118-fix-bond-slave-honoring-mtu.yml
- - 8126-filesystem-bcachefs-support.yaml
- - 8133-add-error-message-for-linode-inventory-plugin.yaml
- - 8151-fix-lsvg_cmd-failed.yml
- - 8153-java_cert-add-cert_content-arg.yml
- - 8154-add-ovs-commands-to-nmcli-module.yml
- - 8158-gitlab-version-check.yml
- - 8163-redfish-implementing-reset-to-defaults.yml
- - 8166-password-store-lookup-missing-subkey.yml
- - 8169-lxml.yml
- - 8173-osx_defaults-check_type.yml
- - 8175-get_ipa_version_regex.yml
- - 8183-from_ini_to_ini.yml
- - 8188-bitwarden-add-organization_id.yml
- - 8194-redfish-add-multipart-to-capabilities.yml
- - 8199-added-usb-support-to-proxmox-module.yml
- - 8211-riak-admin-sub-command-support.yml
- - 8215-add-docker-v2-protocol.yml
- - 8222-datetime.yml
- - 8223-keycloak_client-additional-normalizations.yaml
- - 8224-keycloak_realm-add-normalizations.yaml
- - 8225-unsafe.yml
- - 8226-mh-vardict.yml
- - 8236-portage-select-feature.yml
- - 8238-bitwarden-secrets-manager-rate-limit-retry-with-backoff.yml
- - 8247-apt_rpm-latest.yml
- - 8257-ssh-config-hostkey-support-accept-new.yaml
- - 8263-apt_rpm-install-check.yml
- - 8264-run_command.yml
- - 8274-homebrew-force-formula.yml
- - 8280-mh-deprecations.yml
- - 8281-puppet-waitforlock.yaml
- - 8285-apt_rpm-state-deprecate.yml
- - 8288-cmdrunner-fmt-list-len-limits.yml
- - 8289-python-runner.yml
- - 8290-gandi-livedns-personal-access-token.yml
- - 8303-fix-rendering-foreign-variables.yaml
- - 8320-keycloak_user_federation-fix-diff-krbPrincipalAttribute.yaml
- - 8321-fix-opentelemetry-callback.yml
- - 8323-refactor-homebrew-logic-module-utils.yml
- - 8334-proxmox-action-group.yml
- - 8355-keycloak-idp-sanitize.yaml
- - 8363-opentelemetry-export-to-a-file.yml
- - 8367-fix-close-span-if-no-logs.yaml
- - 8373-honour-disable-logs.yaml
- - 8379-verbose-mode-pkg5.yml
- - 8383-deprecate-gitlab-basic-auth.yml
- - 9.0.0.yml
- - add-ipa-sudorule-deny-cmd.yml
- - aix_filesystem-crfs-issue.yml
- - bitwarden-lookup-performance.yaml
- - hipchat.yml
- - internal-redirects.yml
- - inventory-rce.yml
- - lxd-instance-not-found-avoid-false-positives.yml
- - lxd-instances-api-endpoint-added.yml
- - pacemaker-cluster.yml
- - pkgin.yml
- - puppet_lang_force.yml
- - remove_deprecated.yml
- modules:
- - description: Bootstrap ACLs in Consul.
- name: consul_acl_bootstrap
- namespace: ''
- - description: Manipulate Consul auth methods.
- name: consul_auth_method
- namespace: ''
- - description: Manipulate Consul binding rules.
- name: consul_binding_rule
- namespace: ''
- - description: Manipulate Consul tokens.
- name: consul_token
- namespace: ''
- - description: Run Django admin commands.
- name: django_command
- namespace: ''
- - description: Enable or disable dnf repositories using config-manager.
- name: dnf_config_manager
- namespace: ''
- - description: Read git configuration.
- name: git_config_info
- namespace: ''
- - description: Manages GitLab group access tokens.
- name: gitlab_group_access_token
- namespace: ''
- - description: Create, update, or delete GitLab issues.
- name: gitlab_issue
- namespace: ''
- - description: Creates/updates/deletes GitLab Labels belonging to project or
- group.
- name: gitlab_label
- namespace: ''
- - description: Creates/updates/deletes GitLab Milestones belonging to project
- or group.
- name: gitlab_milestone
- namespace: ''
- - description: Manages GitLab project access tokens.
- name: gitlab_project_access_token
- namespace: ''
- - description: Allows administration of Keycloak client roles scope to restrict
- the usage of certain roles to a other specific client applications.
- name: keycloak_client_rolescope
- namespace: ''
- - description: Retrive component info in Keycloak.
- name: keycloak_component_info
- namespace: ''
- - description: Allows administration of Keycloak realm role mappings into groups
- with the Keycloak API.
- name: keycloak_realm_rolemapping
- namespace: ''
- - description: Manage Nomad ACL tokens.
- name: nomad_token
- namespace: ''
- - description: Retrieve information about one or more Proxmox VE nodes.
- name: proxmox_node_info
- namespace: ''
- - description: List content from a Proxmox VE storage.
- name: proxmox_storage_contents_info
- namespace: ''
- - description: Allows listing information about USB devices.
- name: usb_facts
- namespace: ''
- plugins:
- become:
- - description: Systemd's run0.
- name: run0
- namespace: null
- callback:
- - description: The default ansible callback without diff output.
- name: default_without_diff
- namespace: null
- - description: Adds simple timestamp for each header.
- name: timestamp
- namespace: null
- connection:
- - description: Run tasks in Incus instances via the Incus CLI.
- name: incus
- namespace: null
- filter:
- - description: Converts INI text input into a dictionary.
- name: from_ini
- namespace: null
- - description: Difference of lists with a predictive order.
- name: lists_difference
- namespace: null
- - description: Intersection of lists with a predictive order.
- name: lists_intersect
- namespace: null
- - description: Symmetric Difference of lists with a predictive order.
- name: lists_symmetric_difference
- namespace: null
- - description: Union of lists with a predictive order.
- name: lists_union
- namespace: null
- - description: Converts a dictionary to the INI file format.
- name: to_ini
- namespace: null
- lookup:
- - description: Obtain short-lived Github App Access tokens.
- name: github_app_access_token
- namespace: null
- - description: Fetch documents stored in 1Password.
- name: onepassword_doc
- namespace: null
- test:
- - description: Validates fully-qualified domain names against RFC 1123.
- name: fqdn_valid
- namespace: null
- release_date: '2024-05-20'
- 9.0.1:
- changes:
- bugfixes:
- - cpanm - use new ``VarDict`` to prevent deprecation warning (https://github.com/ansible-collections/community.general/issues/8410,
- https://github.com/ansible-collections/community.general/pull/8411).
- - django module utils - use new ``VarDict`` to prevent deprecation warning
- (https://github.com/ansible-collections/community.general/issues/8410, https://github.com/ansible-collections/community.general/pull/8411).
- - gconftool2_info - use new ``VarDict`` to prevent deprecation warning (https://github.com/ansible-collections/community.general/issues/8410,
- https://github.com/ansible-collections/community.general/pull/8411).
- - homebrew - do not fail when brew prints warnings (https://github.com/ansible-collections/community.general/pull/8406,
- https://github.com/ansible-collections/community.general/issues/7044).
- - hponcfg - use new ``VarDict`` to prevent deprecation warning (https://github.com/ansible-collections/community.general/issues/8410,
- https://github.com/ansible-collections/community.general/pull/8411).
- - kernel_blacklist - use new ``VarDict`` to prevent deprecation warning (https://github.com/ansible-collections/community.general/issues/8410,
- https://github.com/ansible-collections/community.general/pull/8411).
- - keycloak_client - fix TypeError when sanitizing the ``saml.signing.private.key``
- attribute in the module's diff or state output. The ``sanitize_cr`` function
- expected a dict where in some cases a list might occur (https://github.com/ansible-collections/community.general/pull/8403).
- - locale_gen - use new ``VarDict`` to prevent deprecation warning (https://github.com/ansible-collections/community.general/issues/8410,
- https://github.com/ansible-collections/community.general/pull/8411).
- - mksysb - use new ``VarDict`` to prevent deprecation warning (https://github.com/ansible-collections/community.general/issues/8410,
- https://github.com/ansible-collections/community.general/pull/8411).
- - pipx_info - use new ``VarDict`` to prevent deprecation warning (https://github.com/ansible-collections/community.general/issues/8410,
- https://github.com/ansible-collections/community.general/pull/8411).
- - snap - use new ``VarDict`` to prevent deprecation warning (https://github.com/ansible-collections/community.general/issues/8410,
- https://github.com/ansible-collections/community.general/pull/8411).
- - snap_alias - use new ``VarDict`` to prevent deprecation warning (https://github.com/ansible-collections/community.general/issues/8410,
- https://github.com/ansible-collections/community.general/pull/8411).
- minor_changes:
- - ansible_galaxy_install - minor refactor in the module (https://github.com/ansible-collections/community.general/pull/8413).
- release_summary: Bugfix release for inclusion in Ansible 10.0.0rc1.
- fragments:
- - 8403-fix-typeerror-in-keycloak-client.yaml
- - 8406-fix-homebrew-cask-warning.yaml
- - 8411-locale-gen-vardict.yml
- - 8413-galaxy-refactor.yml
- - 9.0.1.yml
- release_date: '2024-05-27'
- 9.1.0:
- changes:
- bugfixes:
- - git_config - fix behavior of ``state=absent`` if ``value`` is present (https://github.com/ansible-collections/community.general/issues/8436,
- https://github.com/ansible-collections/community.general/pull/8452).
- - keycloak_realm - add normalizations for ``attributes`` and ``protocol_mappers``
- (https://github.com/ansible-collections/community.general/pull/8496).
- - launched - correctly report changed status in check mode (https://github.com/ansible-collections/community.general/pull/8406).
- - opennebula inventory plugin - fix invalid reference to IP when inventory
- runs against NICs with no IPv4 address (https://github.com/ansible-collections/community.general/pull/8489).
- - opentelemetry callback - do not save the JSON response when using the ``ansible.builtin.uri``
- module (https://github.com/ansible-collections/community.general/pull/8430).
- - opentelemetry callback - do not save the content response when using the
- ``ansible.builtin.slurp`` module (https://github.com/ansible-collections/community.general/pull/8430).
- - paman - do not fail if an empty list of packages has been provided and there
- is nothing to do (https://github.com/ansible-collections/community.general/pull/8514).
- deprecated_features:
- - CmdRunner module util - setting the value of the ``ignore_none`` parameter
- within a ``CmdRunner`` context is deprecated and that feature should be
- removed in community.general 12.0.0 (https://github.com/ansible-collections/community.general/pull/8479).
- - git_config - the ``list_all`` option has been deprecated and will be removed
- in community.general 11.0.0. Use the ``community.general.git_config_info``
- module instead (https://github.com/ansible-collections/community.general/pull/8453).
- - git_config - using ``state=present`` without providing ``value`` is deprecated
- and will be disallowed in community.general 11.0.0. Use the ``community.general.git_config_info``
- module instead to read a value (https://github.com/ansible-collections/community.general/pull/8453).
- known_issues:
- - homectl - the module does not work under Python 3.13 or newer, since it
- relies on the removed ``crypt`` standard library module (https://github.com/ansible-collections/community.general/issues/4691,
- https://github.com/ansible-collections/community.general/pull/8497).
- - udm_user - the module does not work under Python 3.13 or newer, since it
- relies on the removed ``crypt`` standard library module (https://github.com/ansible-collections/community.general/issues/4690,
- https://github.com/ansible-collections/community.general/pull/8497).
- minor_changes:
- - CmdRunner module util - argument formats can be specified as plain functions
- without calling ``cmd_runner_fmt.as_func()`` (https://github.com/ansible-collections/community.general/pull/8479).
- - ansible_galaxy_install - add upgrade feature (https://github.com/ansible-collections/community.general/pull/8431,
- https://github.com/ansible-collections/community.general/issues/8351).
- - cargo - add option ``directory``, which allows source directory to be specified
- (https://github.com/ansible-collections/community.general/pull/8480).
- - cmd_runner module utils - add decorator ``cmd_runner_fmt.stack`` (https://github.com/ansible-collections/community.general/pull/8415).
- - cmd_runner_fmt module utils - simplify implementation of ``cmd_runner_fmt.as_bool_not()``
- (https://github.com/ansible-collections/community.general/pull/8512).
- - ipa_dnsrecord - adds ``SSHFP`` record type for managing SSH fingerprints
- in FreeIPA DNS (https://github.com/ansible-collections/community.general/pull/8404).
- - keycloak_client - assign auth flow by name (https://github.com/ansible-collections/community.general/pull/8428).
- - openbsd_pkg - adds diff support to show changes in installed package list.
- This does not yet work for check mode (https://github.com/ansible-collections/community.general/pull/8402).
- - proxmox - allow specification of the API port when using proxmox_* (https://github.com/ansible-collections/community.general/issues/8440,
- https://github.com/ansible-collections/community.general/pull/8441).
- - proxmox_vm_info - add ``network`` option to retrieve current network information
- (https://github.com/ansible-collections/community.general/pull/8471).
- - redfish_command - add ``wait`` and ``wait_timeout`` options to allow a user
- to block a command until a service is accessible after performing the requested
- command (https://github.com/ansible-collections/community.general/issues/8051,
- https://github.com/ansible-collections/community.general/pull/8434).
- - redfish_info - add command ``CheckAvailability`` to check if a service is
- accessible (https://github.com/ansible-collections/community.general/issues/8051,
- https://github.com/ansible-collections/community.general/pull/8434).
- - redis_info - adds support for getting cluster info (https://github.com/ansible-collections/community.general/pull/8464).
- release_summary: Regular feature and bugfix release.
- fragments:
- - 8051-Redfish-Wait-For-Service.yml
- - 8402-add-diif-mode-openbsd-pkg.yml
- - 8404-ipa_dnsrecord_sshfp.yml
- - 8415-cmd-runner-stack.yml
- - 8428-assign-auth-flow-by-name-keycloak-client.yaml
- - 8430-fix-opentelemetry-when-using-logs-with-uri-or-slurp-tasks.yaml
- - 8431-galaxy-upgrade.yml
- - 8440-allow-api-port-specification.yaml
- - 8452-git_config-absent.yml
- - 8453-git_config-deprecate-read.yml
- - 8464-redis-add-cluster-info.yml
- - 8471-proxmox-vm-info-network.yml
- - 8476-launchd-check-mode-changed.yaml
- - 8479-cmdrunner-improvements.yml
- - 8480-directory-feature-cargo.yml
- - 8489-fix-opennebula-inventory-crash-when-nic-has-no-ip.yml
- - 8496-keycloak_clientscope-add-normalizations.yaml
- - 8497-crypt.yml
- - 8512-as-bool-not.yml
- - 8514-pacman-empty.yml
- - 9.1.0.yml
- modules:
- - description: Add, modify, and delete checks within a consul cluster.
- name: consul_agent_check
- namespace: ''
- - description: Add, modify and delete services within a consul cluster.
- name: consul_agent_service
- namespace: ''
- - description: Wrapper for C(django-admin check).
- name: django_check
- namespace: ''
- - description: Wrapper for C(django-admin createcachetable).
- name: django_createcachetable
- namespace: ''
- plugins:
- filter:
- - description: Keep specific keys from dictionaries in a list.
- name: keep_keys
- namespace: null
- - description: Remove specific keys from dictionaries in a list.
- name: remove_keys
- namespace: null
- - description: Replace specific keys in a list of dictionaries.
- name: replace_keys
- namespace: null
- release_date: '2024-06-17'
- 9.2.0:
- changes:
- bugfixes:
- - bitwarden lookup plugin - fix ``KeyError`` in ``search_field`` (https://github.com/ansible-collections/community.general/issues/8549,
- https://github.com/ansible-collections/community.general/pull/8557).
- - keycloak_clientscope - remove IDs from clientscope and its protocol mappers
- on comparison for changed check (https://github.com/ansible-collections/community.general/pull/8545).
- - nsupdate - fix 'index out of range' error when changing NS records by falling
- back to authority section of the response (https://github.com/ansible-collections/community.general/issues/8612,
- https://github.com/ansible-collections/community.general/pull/8614).
- - proxmox - fix idempotency on creation of mount volumes using Proxmox' special
- ``:`` syntax (https://github.com/ansible-collections/community.general/issues/8407,
- https://github.com/ansible-collections/community.general/pull/8542).
- - redfish_utils module utils - do not fail when language is not exactly "en"
- (https://github.com/ansible-collections/community.general/pull/8613).
- minor_changes:
- - CmdRunner module utils - the parameter ``force_lang`` now supports the special
- value ``auto`` which will automatically try and determine the best parsable
- locale in the system (https://github.com/ansible-collections/community.general/pull/8517).
- - proxmox - add ``disk_volume`` and ``mount_volumes`` keys for better readability
- (https://github.com/ansible-collections/community.general/pull/8542).
- - proxmox - translate the old ``disk`` and ``mounts`` keys to the new handling
- internally (https://github.com/ansible-collections/community.general/pull/8542).
- - proxmox_template - small refactor in logic for determining whether a template
- exists or not (https://github.com/ansible-collections/community.general/pull/8516).
- - redfish_* modules - adds ``ciphers`` option for custom cipher selection
- (https://github.com/ansible-collections/community.general/pull/8533).
- - sudosu become plugin - added an option (``alt_method``) to enhance compatibility
- with more versions of ``su`` (https://github.com/ansible-collections/community.general/pull/8214).
- - virtualbox inventory plugin - expose a new parameter ``enable_advanced_group_parsing``
- to change how the VirtualBox dynamic inventory parses VM groups (https://github.com/ansible-collections/community.general/issues/8508,
- https://github.com/ansible-collections/community.general/pull/8510).
- - wdc_redfish_command - minor change to handle upgrade file for Redfish WD
- platforms (https://github.com/ansible-collections/community.general/pull/8444).
- release_summary: Regular bugfix and feature release.
- fragments:
- - 8214-sudosu-not-working-on-some-BSD-machines.yml
- - 8444-fix-redfish-gen2-upgrade.yaml
- - 8508-virtualbox-inventory.yml
- - 8516-proxmox-template-refactor.yml
- - 8517-cmd-runner-lang-auto.yml
- - 8533-add-ciphers-option.yml
- - 8542-fix-proxmox-volume-handling.yml
- - 8545-keycloak-clientscope-remove-id-on-compare.yml
- - 8557-fix-bug-with-bitwarden.yml
- - 8613-redfish_utils-language.yaml
- - 8614-nsupdate-index-out-of-range.yml
- - 9.2.0.yml
- plugins:
- filter:
- - description: Return input type.
- name: reveal_ansible_type
- namespace: null
- test:
- - description: Validate input type.
- name: ansible_type
- namespace: null
- release_date: '2024-07-15'
- 9.3.0:
- changes:
- bugfixes:
- - gitlab_runner - fix ``paused`` parameter being ignored (https://github.com/ansible-collections/community.general/pull/8648).
- - homebrew_cask - fix ``upgrade_all`` returns ``changed`` when nothing upgraded
- (https://github.com/ansible-collections/community.general/issues/8707, https://github.com/ansible-collections/community.general/pull/8708).
- - keycloak_user_federation - get cleartext IDP ``clientSecret`` from full
- realm info to detect changes to it (https://github.com/ansible-collections/community.general/issues/8294,
- https://github.com/ansible-collections/community.general/pull/8735).
- - keycloak_user_federation - remove existing user federation mappers if they
- are not present in the federation configuration and will not be updated
- (https://github.com/ansible-collections/community.general/issues/7169, https://github.com/ansible-collections/community.general/pull/8695).
- - proxmox - fixed an issue where the new volume handling incorrectly converted
- ``null`` values into ``"None"`` strings (https://github.com/ansible-collections/community.general/pull/8646).
- - proxmox - fixed an issue where volume strings where overwritten instead
- of appended to in the new ``build_volume()`` method (https://github.com/ansible-collections/community.general/pull/8646).
- - proxmox - removed the forced conversion of non-string values to strings
- to be consistent with the module documentation (https://github.com/ansible-collections/community.general/pull/8646).
- minor_changes:
- - cgroup_memory_recap, hipchat, jabber, log_plays, loganalytics, logentries,
- logstash, slack, splunk, sumologic, syslog_json callback plugins - make
- sure that all options are typed (https://github.com/ansible-collections/community.general/pull/8628).
- - chef_databag, consul_kv, cyberarkpassword, dsv, etcd, filetree, hiera, onepassword,
- onepassword_doc, onepassword_raw, passwordstore, redis, shelvefile, tss
- lookup plugins - make sure that all options are typed (https://github.com/ansible-collections/community.general/pull/8626).
- - chroot, funcd, incus, iocage, jail, lxc, lxd, qubes, zone connection plugins
- - make sure that all options are typed (https://github.com/ansible-collections/community.general/pull/8627).
- - cobbler, linode, lxd, nmap, online, scaleway, stackpath_compute, virtualbox
- inventory plugins - make sure that all options are typed (https://github.com/ansible-collections/community.general/pull/8625).
- - doas, dzdo, ksu, machinectl, pbrun, pfexec, pmrun, sesu, sudosu become plugins
- - make sure that all options are typed (https://github.com/ansible-collections/community.general/pull/8623).
- - gconftool2 - make use of ``ModuleHelper`` features to simplify code (https://github.com/ansible-collections/community.general/pull/8711).
- - gitlab_project - add option ``container_expiration_policy`` to schedule
- container registry cleanup (https://github.com/ansible-collections/community.general/pull/8674).
- - gitlab_project - add option ``model_registry_access_level`` to disable model
- registry (https://github.com/ansible-collections/community.general/pull/8688).
- - gitlab_project - add option ``pages_access_level`` to disable project pages
- (https://github.com/ansible-collections/community.general/pull/8688).
- - gitlab_project - add option ``repository_access_level`` to disable project
- repository (https://github.com/ansible-collections/community.general/pull/8674).
- - gitlab_project - add option ``service_desk_enabled`` to disable service
- desk (https://github.com/ansible-collections/community.general/pull/8688).
- - locale_gen - add support for multiple locales (https://github.com/ansible-collections/community.general/issues/8677,
- https://github.com/ansible-collections/community.general/pull/8682).
- - memcached, pickle, redis, yaml cache plugins - make sure that all options
- are typed (https://github.com/ansible-collections/community.general/pull/8624).
- - opentelemetry callback plugin - fix default value for ``store_spans_in_file``
- causing traces to be produced to a file named ``None`` (https://github.com/ansible-collections/community.general/issues/8566,
- https://github.com/ansible-collections/community.general/pull/8741).
- - passwordstore lookup plugin - add the current user to the lockfile file
- name to address issues on multi-user systems (https://github.com/ansible-collections/community.general/pull/8689).
- - pipx - add parameter ``suffix`` to module (https://github.com/ansible-collections/community.general/pull/8675,
- https://github.com/ansible-collections/community.general/issues/8656).
- - pkgng - add option ``use_globs`` (default ``true``) to optionally disable
- glob patterns (https://github.com/ansible-collections/community.general/issues/8632,
- https://github.com/ansible-collections/community.general/pull/8633).
- - proxmox inventory plugin - add new fact for LXC interface details (https://github.com/ansible-collections/community.general/pull/8713).
- - redis, redis_info - add ``client_cert`` and ``client_key`` options to specify
- path to certificate for Redis authentication (https://github.com/ansible-collections/community.general/pull/8654).
- release_summary: Regular bugfix and feature release.
- fragments:
- - 8623-become-types.yml
- - 8624-cache-types.yml
- - 8625-inventory-types.yml
- - 8626-lookup-types.yml
- - 8627-connection-types.yml
- - 8628-callback-types.yml
- - 8632-pkgng-add-option-use_globs.yml
- - 8646-fix-bug-in-proxmox-volumes.yml
- - 8648-fix-gitlab-runner-paused.yaml
- - 8654-add-redis-tls-params.yml
- - 8674-add-gitlab-project-cleanup-policy.yml
- - 8675-pipx-install-suffix.yml
- - 8682-locale-gen-multiple.yaml
- - 8688-gitlab_project-add-new-params.yml
- - 8689-passwordstore-lock-naming.yml
- - 8695-keycloak_user_federation-mapper-removal.yml
- - 8708-homebrew_cask-fix-upgrade-all.yml
- - 8711-gconftool2-refactor.yml
- - 8713-proxmox_lxc_interfaces.yml
- - 8735-keycloak_identity_provider-get-cleartext-secret-from-realm-info.yml
- - 8741-fix-opentelemetry-callback.yml
- - 9.3.0.yml
- modules:
- - description: Bootc Switch and Upgrade.
- name: bootc_manage
- namespace: ''
- - description: Services manager for Homebrew.
- name: homebrew_services
- namespace: ''
- - description: Allows obtaining Keycloak realm keys metadata via Keycloak API.
- name: keycloak_realm_keys_metadata_info
- namespace: ''
- release_date: '2024-08-12'
- 9.4.0:
- changes:
- bugfixes:
- - gitlab_group_access_token - fix crash in check mode caused by attempted
- access to a newly created access token (https://github.com/ansible-collections/community.general/pull/8796).
- - gitlab_project - fix ``container_expiration_policy`` not being applied when
- creating a new project (https://github.com/ansible-collections/community.general/pull/8790).
- - gitlab_project - fix crash caused by old Gitlab projects not having a ``container_expiration_policy``
- attribute (https://github.com/ansible-collections/community.general/pull/8790).
- - gitlab_project_access_token - fix crash in check mode caused by attempted
- access to a newly created access token (https://github.com/ansible-collections/community.general/pull/8796).
- - keycloak_realm_key - fix invalid usage of ``parent_id`` (https://github.com/ansible-collections/community.general/issues/7850,
- https://github.com/ansible-collections/community.general/pull/8823).
- - keycloak_user_federation - fix key error when removing mappers during an
- update and new mappers are specified in the module args (https://github.com/ansible-collections/community.general/pull/8762).
- - keycloak_user_federation - fix the ``UnboundLocalError`` that occurs when
- an ID is provided for a user federation mapper (https://github.com/ansible-collections/community.general/pull/8831).
- - keycloak_user_federation - sort desired and after mapper list by name (analog
- to before mapper list) to minimize diff and make change detection more accurate
- (https://github.com/ansible-collections/community.general/pull/8761).
- - proxmox inventory plugin - fixed a possible error on concatenating responses
- from proxmox. In case an API call unexpectedly returned an empty result,
- the inventory failed with a fatal error. Added check for empty response
- (https://github.com/ansible-collections/community.general/issues/8798, https://github.com/ansible-collections/community.general/pull/8794).
- deprecated_features:
- - MH decorator cause_changes module utils - deprecate parameters ``on_success``
- and ``on_failure`` (https://github.com/ansible-collections/community.general/pull/8791).
- - 'pipx - support for versions of the command line tool ``pipx`` older than
- ``1.7.0`` is deprecated and will be removed in community.general 11.0.0
- (https://github.com/ansible-collections/community.general/pull/8793).
-
- '
- - 'pipx_info - support for versions of the command line tool ``pipx`` older
- than ``1.7.0`` is deprecated and will be removed in community.general 11.0.0
- (https://github.com/ansible-collections/community.general/pull/8793).
-
- '
- minor_changes:
- - MH module utils - add parameter ``when`` to ``cause_changes`` decorator
- (https://github.com/ansible-collections/community.general/pull/8766).
- - MH module utils - minor refactor in decorators (https://github.com/ansible-collections/community.general/pull/8766).
- - alternatives - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833).
- - apache2_mod_proxy - replace Python 2.6 construct with dict comprehensions
- (https://github.com/ansible-collections/community.general/pull/8814).
- - apache2_mod_proxy - replace Python 2.6 construct with dict comprehensions
- (https://github.com/ansible-collections/community.general/pull/8833).
- - consul_acl - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833).
- - copr - Added ``includepkgs`` and ``excludepkgs`` parameters to limit the
- list of packages fetched or excluded from the repository(https://github.com/ansible-collections/community.general/pull/8779).
- - credstash lookup plugin - replace Python 2.6 construct with dict comprehensions
- (https://github.com/ansible-collections/community.general/pull/8822).
- - csv module utils - replace Python 2.6 construct with dict comprehensions
- (https://github.com/ansible-collections/community.general/pull/8814).
- - deco MH module utils - replace Python 2.6 construct with dict comprehensions
- (https://github.com/ansible-collections/community.general/pull/8822).
- - etcd3 - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822).
- - gio_mime - mute the old ``VarDict`` deprecation (https://github.com/ansible-collections/community.general/pull/8776).
- - gitlab_group - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814).
- - gitlab_project - add option ``issues_access_level`` to enable/disable project
- issues (https://github.com/ansible-collections/community.general/pull/8760).
- - gitlab_project - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822).
- - gitlab_project - sorted parameters in order to avoid future merge conflicts
- (https://github.com/ansible-collections/community.general/pull/8759).
- - hashids filter plugin - replace Python 2.6 construct with dict comprehensions
- (https://github.com/ansible-collections/community.general/pull/8814).
- - hwc_ecs_instance - replace Python 2.6 construct with dict comprehensions
- (https://github.com/ansible-collections/community.general/pull/8822).
- - hwc_evs_disk - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822).
- - hwc_vpc_eip - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822).
- - hwc_vpc_peering_connect - replace Python 2.6 construct with dict comprehensions
- (https://github.com/ansible-collections/community.general/pull/8822).
- - hwc_vpc_port - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822).
- - hwc_vpc_subnet - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822).
- - imc_rest - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833).
- - ipa_otptoken - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822).
- - jira - mute the old ``VarDict`` deprecation (https://github.com/ansible-collections/community.general/pull/8776).
- - jira - replace deprecated params when using decorator ``cause_changes``
- (https://github.com/ansible-collections/community.general/pull/8791).
- - keep_keys filter plugin - replace Python 2.6 construct with dict comprehensions
- (https://github.com/ansible-collections/community.general/pull/8814).
- - keycloak module utils - replace Python 2.6 construct with dict comprehensions
- (https://github.com/ansible-collections/community.general/pull/8822).
- - keycloak_client - replace Python 2.6 construct with dict comprehensions
- (https://github.com/ansible-collections/community.general/pull/8814).
- - keycloak_clientscope - replace Python 2.6 construct with dict comprehensions
- (https://github.com/ansible-collections/community.general/pull/8814).
- - keycloak_identity_provider - replace Python 2.6 construct with dict comprehensions
- (https://github.com/ansible-collections/community.general/pull/8814).
- - keycloak_user_federation - add module argument allowing users to optout
- of the removal of unspecified mappers, for example to keep the keycloak
- default mappers (https://github.com/ansible-collections/community.general/pull/8764).
- - keycloak_user_federation - replace Python 2.6 construct with dict comprehensions
- (https://github.com/ansible-collections/community.general/pull/8814).
- - keycloak_user_federation - replace Python 2.6 construct with dict comprehensions
- (https://github.com/ansible-collections/community.general/pull/8822).
- - keycloak_user_federation - replace Python 2.6 construct with dict comprehensions
- (https://github.com/ansible-collections/community.general/pull/8833).
- - linode - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814).
- - lxc_container - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822).
- - lxd_container - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814).
- - manageiq_provider - replace Python 2.6 construct with dict comprehensions
- (https://github.com/ansible-collections/community.general/pull/8814).
- - ocapi_utils - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833).
- - one_service - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814).
- - one_vm - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814).
- - onepassword lookup plugin - replace Python 2.6 construct with dict comprehensions
- (https://github.com/ansible-collections/community.general/pull/8833).
- - pids - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833).
- - pipx - added new states ``install_all``, ``uninject``, ``upgrade_shared``,
- ``pin``, and ``unpin`` (https://github.com/ansible-collections/community.general/pull/8809).
- - pipx - added parameter ``global`` to module (https://github.com/ansible-collections/community.general/pull/8793).
- - pipx - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833).
- - pipx_info - added parameter ``global`` to module (https://github.com/ansible-collections/community.general/pull/8793).
- - pipx_info - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833).
- - pkg5_publisher - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833).
- - proxmox - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814).
- - proxmox_disk - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814).
- - proxmox_kvm - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814).
- - proxmox_kvm - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822).
- - redfish_utils - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833).
- - redfish_utils module utils - replace Python 2.6 construct with dict comprehensions
- (https://github.com/ansible-collections/community.general/pull/8822).
- - redis cache plugin - replace Python 2.6 construct with dict comprehensions
- (https://github.com/ansible-collections/community.general/pull/8833).
- - remove_keys filter plugin - replace Python 2.6 construct with dict comprehensions
- (https://github.com/ansible-collections/community.general/pull/8814).
- - replace_keys filter plugin - replace Python 2.6 construct with dict comprehensions
- (https://github.com/ansible-collections/community.general/pull/8814).
- - scaleway - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833).
- - scaleway module utils - replace Python 2.6 construct with dict comprehensions
- (https://github.com/ansible-collections/community.general/pull/8822).
- - scaleway_compute - replace Python 2.6 construct with dict comprehensions
- (https://github.com/ansible-collections/community.general/pull/8833).
- - scaleway_ip - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833).
- - scaleway_lb - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833).
- - scaleway_security_group - replace Python 2.6 construct with dict comprehensions
- (https://github.com/ansible-collections/community.general/pull/8822).
- - scaleway_security_group - replace Python 2.6 construct with dict comprehensions
- (https://github.com/ansible-collections/community.general/pull/8833).
- - scaleway_user_data - replace Python 2.6 construct with dict comprehensions
- (https://github.com/ansible-collections/community.general/pull/8833).
- - sensu_silence - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833).
- - snmp_facts - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833).
- - sorcery - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833).
- - ufw - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822).
- - unsafe plugin utils - replace Python 2.6 construct with dict comprehensions
- (https://github.com/ansible-collections/community.general/pull/8814).
- - vardict module utils - replace Python 2.6 construct with dict comprehensions
- (https://github.com/ansible-collections/community.general/pull/8814).
- - vars MH module utils - replace Python 2.6 construct with dict comprehensions
- (https://github.com/ansible-collections/community.general/pull/8814).
- - vmadm - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822).
- release_summary: Bugfix and feature release.
- fragments:
- - 8738-limit-packages-for-copr.yml
- - 8759-gitlab_project-sort-params.yml
- - 8760-gitlab_project-add-issues-access-level.yml
- - 8761-keycloak_user_federation-sort-desired-and-after-mappers-by-name.yml
- - 8762-keycloac_user_federation-fix-key-error-when-updating.yml
- - 8764-keycloak_user_federation-make-mapper-removal-optout.yml
- - 8766-mh-deco-improve.yml
- - 8776-mute-vardict-deprecation.yml
- - 8790-gitlab_project-fix-cleanup-policy-on-project-create.yml
- - 8791-mh-cause-changes-param-depr.yml
- - 8793-pipx-global.yml
- - 8794-Fixing-possible-concatination-error.yaml
- - 8796-gitlab-access-token-check-mode.yml
- - 8809-pipx-new-params.yml
- - 8814-dict-comprehension.yml
- - 8822-dict-comprehension.yml
- - 8823-keycloak-realm-key.yml
- - 8831-fix-error-when-mapper-id-is-provided.yml
- - 8833-dict-comprehension.yml
- - 9.4.0.yml
- modules:
- - description: Allows managing Keycloak User Profiles.
- name: keycloak_userprofile
- namespace: ''
- - description: Manages OpenNebula virtual networks.
- name: one_vnet
- namespace: ''
- release_date: '2024-09-09'
- 9.5.0:
- changes:
- bugfixes:
- - cloudflare_dns - fix changing Cloudflare SRV records (https://github.com/ansible-collections/community.general/issues/8679,
- https://github.com/ansible-collections/community.general/pull/8948).
- - cmd_runner module utils - call to ``get_best_parsable_locales()`` was missing
- parameter (https://github.com/ansible-collections/community.general/pull/8929).
- - dig lookup plugin - fix using only the last nameserver specified (https://github.com/ansible-collections/community.general/pull/8970).
- - django_command - option ``command`` is now split lexically before passed
- to underlying PythonRunner (https://github.com/ansible-collections/community.general/pull/8944).
- - homectl - the module now tries to use ``legacycrypt`` on Python 3.13+ (https://github.com/ansible-collections/community.general/issues/4691,
- https://github.com/ansible-collections/community.general/pull/8987).
- - ini_file - pass absolute paths to ``module.atomic_move()`` (https://github.com/ansible/ansible/issues/83950,
- https://github.com/ansible-collections/community.general/pull/8925).
- - ipa_host - add ``force_create``, fix ``enabled`` and ``disabled`` states
- (https://github.com/ansible-collections/community.general/issues/1094, https://github.com/ansible-collections/community.general/pull/8920).
- - ipa_hostgroup - fix ``enabled `` and ``disabled`` states (https://github.com/ansible-collections/community.general/issues/8408,
- https://github.com/ansible-collections/community.general/pull/8900).
- - java_keystore - pass absolute paths to ``module.atomic_move()`` (https://github.com/ansible/ansible/issues/83950,
- https://github.com/ansible-collections/community.general/pull/8925).
- - jenkins_plugin - pass absolute paths to ``module.atomic_move()`` (https://github.com/ansible/ansible/issues/83950,
- https://github.com/ansible-collections/community.general/pull/8925).
- - kdeconfig - pass absolute paths to ``module.atomic_move()`` (https://github.com/ansible/ansible/issues/83950,
- https://github.com/ansible-collections/community.general/pull/8925).
- - keycloak_realm - fix change detection in check mode by sorting the lists
- in the realms beforehand (https://github.com/ansible-collections/community.general/pull/8877).
- - keycloak_user_federation - add module argument allowing users to configure
- the update mode for the parameter ``bindCredential`` (https://github.com/ansible-collections/community.general/pull/8898).
- - keycloak_user_federation - minimize change detection by setting ``krbPrincipalAttribute``
- to ``''`` in Keycloak responses if missing (https://github.com/ansible-collections/community.general/pull/8785).
- - keycloak_user_federation - remove ``lastSync`` parameter from Keycloak responses
- to minimize diff/changes (https://github.com/ansible-collections/community.general/pull/8812).
- - keycloak_userprofile - fix empty response when fetching userprofile component
- by removing ``parent=parent_id`` filter (https://github.com/ansible-collections/community.general/pull/8923).
- - keycloak_userprofile - improve diff by deserializing the fetched ``kc.user.profile.config``
- and serialize it only when sending back (https://github.com/ansible-collections/community.general/pull/8940).
- - lxd_container - fix bug introduced in previous commit (https://github.com/ansible-collections/community.general/pull/8895,
- https://github.com/ansible-collections/community.general/issues/8888).
- - one_service - fix service creation after it was deleted with ``unique``
- parameter (https://github.com/ansible-collections/community.general/issues/3137,
- https://github.com/ansible-collections/community.general/pull/8887).
- - pam_limits - pass absolute paths to ``module.atomic_move()`` (https://github.com/ansible/ansible/issues/83950,
- https://github.com/ansible-collections/community.general/pull/8925).
- - python_runner module utils - parameter ``path_prefix`` was being handled
- as string when it should be a list (https://github.com/ansible-collections/community.general/pull/8944).
- - udm_user - the module now tries to use ``legacycrypt`` on Python 3.13+ (https://github.com/ansible-collections/community.general/issues/4690,
- https://github.com/ansible-collections/community.general/pull/8987).
- deprecated_features:
- - hipchat - the hipchat service has been discontinued and the self-hosted
- variant has been End of Life since 2020. The module is therefore deprecated
- and will be removed from community.general 11.0.0 if nobody provides compelling
- reasons to still keep it (https://github.com/ansible-collections/community.general/pull/8919).
- minor_changes:
- - dig lookup plugin - add ``port`` option to specify DNS server port (https://github.com/ansible-collections/community.general/pull/8966).
- - flatpak - improve the parsing of Flatpak application IDs based on official
- guidelines (https://github.com/ansible-collections/community.general/pull/8909).
- - gio_mime - adjust code ahead of the old ``VardDict`` deprecation (https://github.com/ansible-collections/community.general/pull/8855).
- - gitlab_deploy_key - better construct when using ``dict.items()`` (https://github.com/ansible-collections/community.general/pull/8876).
- - gitlab_group - add many new parameters (https://github.com/ansible-collections/community.general/pull/8908).
- - gitlab_group - better construct when using ``dict.items()`` (https://github.com/ansible-collections/community.general/pull/8876).
- - gitlab_issue - better construct when using ``dict.items()`` (https://github.com/ansible-collections/community.general/pull/8876).
- - gitlab_merge_request - better construct when using ``dict.items()`` (https://github.com/ansible-collections/community.general/pull/8876).
- - gitlab_runner - better construct when using ``dict.items()`` (https://github.com/ansible-collections/community.general/pull/8876).
- - icinga2_host - replace loop with dict comprehension (https://github.com/ansible-collections/community.general/pull/8876).
- - jira - adjust code ahead of the old ``VardDict`` deprecation (https://github.com/ansible-collections/community.general/pull/8856).
- - keycloak_client - add ``client-x509`` choice to ``client_authenticator_type``
- (https://github.com/ansible-collections/community.general/pull/8973).
- - keycloak_user_federation - add the user federation config parameter ``referral``
- to the module arguments (https://github.com/ansible-collections/community.general/pull/8954).
- - memset_dns_reload - replace loop with ``dict()`` (https://github.com/ansible-collections/community.general/pull/8876).
- - memset_memstore_info - replace loop with ``dict()`` (https://github.com/ansible-collections/community.general/pull/8876).
- - memset_server_info - replace loop with ``dict()`` (https://github.com/ansible-collections/community.general/pull/8876).
- - memset_zone - replace loop with ``dict()`` (https://github.com/ansible-collections/community.general/pull/8876).
- - memset_zone_domain - replace loop with ``dict()`` (https://github.com/ansible-collections/community.general/pull/8876).
- - memset_zone_record - replace loop with ``dict()`` (https://github.com/ansible-collections/community.general/pull/8876).
- - nmcli - add ``conn_enable`` param to reload connection (https://github.com/ansible-collections/community.general/issues/3752,
- https://github.com/ansible-collections/community.general/issues/8704, https://github.com/ansible-collections/community.general/pull/8897).
- - nmcli - add ``state=up`` and ``state=down`` to enable/disable connections
- (https://github.com/ansible-collections/community.general/issues/3752, https://github.com/ansible-collections/community.general/issues/8704,
- https://github.com/ansible-collections/community.general/issues/7152, https://github.com/ansible-collections/community.general/pull/8897).
- - nmcli - better construct when using ``dict.items()`` (https://github.com/ansible-collections/community.general/pull/8876).
- - npm - add ``force`` parameter to allow ``--force`` (https://github.com/ansible-collections/community.general/pull/8885).
- - one_image - add option ``persistent`` to manage image persistence (https://github.com/ansible-collections/community.general/issues/3578,
- https://github.com/ansible-collections/community.general/pull/8889).
- - one_image - extend xsd scheme to make it return a lot more info about image
- (https://github.com/ansible-collections/community.general/pull/8889).
- - one_image - refactor code to make it more similar to ``one_template`` and
- ``one_vnet`` (https://github.com/ansible-collections/community.general/pull/8889).
- - one_image_info - extend xsd scheme to make it return a lot more info about
- image (https://github.com/ansible-collections/community.general/pull/8889).
- - one_image_info - refactor code to make it more similar to ``one_template``
- and ``one_vnet`` (https://github.com/ansible-collections/community.general/pull/8889).
- - open_iscsi - allow login to a portal with multiple targets without specifying
- any of them (https://github.com/ansible-collections/community.general/pull/8719).
- - opennebula.py - add VM ``id`` and VM ``host`` to inventory host data (https://github.com/ansible-collections/community.general/pull/8532).
- - passwordstore lookup plugin - add subkey creation/update support (https://github.com/ansible-collections/community.general/pull/8952).
- - proxmox inventory plugin - clean up authentication code (https://github.com/ansible-collections/community.general/pull/8917).
- - redfish_command - add handling of the ``PasswordChangeRequired`` message
- from services in the ``UpdateUserPassword`` command to directly modify the
- user's password if the requested user is the one invoking the operation
- (https://github.com/ansible-collections/community.general/issues/8652, https://github.com/ansible-collections/community.general/pull/8653).
- - redfish_confg - remove ``CapacityBytes`` from required paramaters of the
- ``CreateVolume`` command (https://github.com/ansible-collections/community.general/pull/8956).
- - redfish_config - add parameter ``storage_none_volume_deletion`` to ``CreateVolume``
- command in order to control the automatic deletion of non-RAID volumes (https://github.com/ansible-collections/community.general/pull/8990).
- - redfish_info - adds ``RedfishURI`` and ``StorageId`` to Disk inventory (https://github.com/ansible-collections/community.general/pull/8937).
- - scaleway_container - replace Python 2.6 construct with dict comprehensions
- (https://github.com/ansible-collections/community.general/pull/8858).
- - scaleway_container_info - replace Python 2.6 construct with dict comprehensions
- (https://github.com/ansible-collections/community.general/pull/8858).
- - scaleway_container_namespace - replace Python 2.6 construct with dict comprehensions
- (https://github.com/ansible-collections/community.general/pull/8858).
- - scaleway_container_namespace_info - replace Python 2.6 construct with dict
- comprehensions (https://github.com/ansible-collections/community.general/pull/8858).
- - scaleway_container_registry - replace Python 2.6 construct with dict comprehensions
- (https://github.com/ansible-collections/community.general/pull/8858).
- - scaleway_container_registry_info - replace Python 2.6 construct with dict
- comprehensions (https://github.com/ansible-collections/community.general/pull/8858).
- - scaleway_function - replace Python 2.6 construct with dict comprehensions
- (https://github.com/ansible-collections/community.general/pull/8858).
- - scaleway_function_info - replace Python 2.6 construct with dict comprehensions
- (https://github.com/ansible-collections/community.general/pull/8858).
- - scaleway_function_namespace - replace Python 2.6 construct with dict comprehensions
- (https://github.com/ansible-collections/community.general/pull/8858).
- - scaleway_function_namespace_info - replace Python 2.6 construct with dict
- comprehensions (https://github.com/ansible-collections/community.general/pull/8858).
- - scaleway_user_data - better construct when using ``dict.items()`` (https://github.com/ansible-collections/community.general/pull/8876).
- - udm_dns_record - replace loop with ``dict.update()`` (https://github.com/ansible-collections/community.general/pull/8876).
- release_summary: 'Regular bugfix and feature release.
-
-
- Please note that this is the last feature release for community.general 9.x.y.
-
- From now on, new features will only go into community.general 10.x.y.'
- fragments:
- - 8532-expand-opennuebula-inventory-data.yml
- - 8652-Redfish-Password-Change-Required.yml
- - 8679-fix-cloudflare-srv.yml
- - 8719-openiscsi-add-multiple-targets.yaml
- - 8785-keycloak_user_federation-set-krbPrincipalAttribute-to-empty-string-if-missing.yaml
- - 8812-keycloak-user-federation-remove-lastSync-param-from-kc-responses.yml
- - 8855-gio_mime_vardict.yml
- - 8856-jira_vardict.yml
- - 8858-dict-comprehension.yml
- - 8876-dict-items-loop.yml
- - 8877-keycloak_realm-sort-lists-before-change-detection.yaml
- - 8885-add-force-flag-for-nmp.yml
- - 8887-fix-one_service-unique.yml
- - 8889-refactor-one-image-modules.yml
- - 8895-fix-comprehension.yaml
- - 8897-nmcli-add-reload-and-up-down.yml
- - 8898-add-arg-to-exclude-bind-credential-from-change-check.yaml
- - 8900-ipa-hostgroup-fix-states.yml
- - 8908-add-gitlab-group-params.yml
- - 8909-flatpak-improve-name-parsing.yaml
- - 8917-proxmox-clean-auth.yml
- - 8920-ipa-host-fix-state.yml
- - 8923-keycloak_userprofile-fix-empty-response-when-fetching-userprofile.yml
- - 8925-atomic.yml
- - 8929-cmd_runner-bugfix.yml
- - 8937-add-StorageId-RedfishURI-to-disk-facts.yml
- - 8940-keycloak_userprofile-improve-diff.yml
- - 8944-django-command-fix.yml
- - 8952-password-store-lookup-create-subkey-support.yml
- - 8954-keycloak-user-federation-add-referral-parameter.yml
- - 8956-remove-capacitybytes-from-the-required-parameters_list.yml
- - 8966-dig-add-port-option.yml
- - 8970-fix-dig-multi-nameservers.yml
- - 8973-keycloak_client-add-x509-auth.yml
- - 8987-legacycrypt.yml
- - 8990.yml
- - 9.5.0.yml
- - deprecate-hipchat.yml
- modules:
- - description: Manage keytab file in FreeIPA.
- name: ipa_getkeytab
- namespace: ''
- release_date: '2024-10-07'
- 9.5.1:
- changes:
- bugfixes:
- - bitwarden lookup plugin - support BWS v0.3.0 syntax breaking change (https://github.com/ansible-collections/community.general/pull/9028).
- - collection_version lookup plugin - use ``importlib`` directly instead of
- the deprecated and in ansible-core 2.19 removed ``ansible.module_utils.compat.importlib``
- (https://github.com/ansible-collections/community.general/pull/9084).
- - gitlab_label - update label's color (https://github.com/ansible-collections/community.general/pull/9010).
- - keycloak_clientscope_type - fix detect changes in check mode (https://github.com/ansible-collections/community.general/issues/9092,
- https://github.com/ansible-collections/community.general/pull/9093).
- - "keycloak_group - fix crash caused in subgroup creation. The crash was caused\
- \ by a missing or empty ``subGroups`` property in Keycloak \u226523 (https://github.com/ansible-collections/community.general/issues/8788,\
- \ https://github.com/ansible-collections/community.general/pull/8979)."
- - modprobe - fix check mode not being honored for ``persistent`` option (https://github.com/ansible-collections/community.general/issues/9051,
- https://github.com/ansible-collections/community.general/pull/9052).
- - one_host - fix if statements for cases when ``ID=0`` (https://github.com/ansible-collections/community.general/issues/1199,
- https://github.com/ansible-collections/community.general/pull/8907).
- - one_image - fix module failing due to a class method typo (https://github.com/ansible-collections/community.general/pull/9056).
- - one_image_info - fix module failing due to a class method typo (https://github.com/ansible-collections/community.general/pull/9056).
- - one_vnet - fix module failing due to a variable typo (https://github.com/ansible-collections/community.general/pull/9019).
- - redfish_utils module utils - fix issue with URI parsing to gracefully handling
- trailing slashes when extracting member identifiers (https://github.com/ansible-collections/community.general/issues/9047,
- https://github.com/ansible-collections/community.general/pull/9057).
- minor_changes:
- - redfish_utils module utils - schedule a BIOS configuration job at next reboot
- when the BIOS config is changed (https://github.com/ansible-collections/community.general/pull/9012).
- release_summary: Regular bugfix release.
- fragments:
- - 8907-fix-one-host-id.yml
- - 8979-keycloak_group-fix-subgroups.yml
- - 9.5.1.yml
- - 9010-edit-gitlab-label-color.yaml
- - 9012-dell-pwrbutton-requires-a-job-initiated-at-reboot.yml
- - 9019-onevnet-bugfix.yml
- - 9028-bitwarden-secrets-manager-syntax-fix.yml
- - 9047-redfish-uri-parsing.yml
- - 9052-modprobe-bugfix.yml
- - 9056-fix-one_image-modules.yml
- - 9084-collection_version-importlib.yml
- - 9092-keycloak-clientscope-type-fix-check-mode.yml
- release_date: '2024-11-03'
- 9.5.2:
- changes:
- bugfixes:
- - dnf_config_manager - fix hanging when prompting to import GPG keys (https://github.com/ansible-collections/community.general/pull/9124,
- https://github.com/ansible-collections/community.general/issues/8830).
- - dnf_config_manager - forces locale to ``C`` before module starts. If the
- locale was set to non-English, the output of the ``dnf config-manager``
- could not be parsed (https://github.com/ansible-collections/community.general/pull/9157,
- https://github.com/ansible-collections/community.general/issues/9046).
- - flatpak - force the locale language to ``C`` when running the flatpak command
- (https://github.com/ansible-collections/community.general/pull/9187, https://github.com/ansible-collections/community.general/issues/8883).
- - github_key - in check mode, a faulty call to ```datetime.strftime(...)```
- was being made which generated an exception (https://github.com/ansible-collections/community.general/issues/9185).
- - homebrew_cask - allow ``+`` symbol in Homebrew cask name validation regex
- (https://github.com/ansible-collections/community.general/pull/9128).
- - keycloak_client - fix diff by removing code that turns the attributes dict
- which contains additional settings into a list (https://github.com/ansible-collections/community.general/pull/9077).
- - keycloak_clientscope - fix diff and ``end_state`` by removing the code that
- turns the attributes dict, which contains additional config items, into
- a list (https://github.com/ansible-collections/community.general/pull/9082).
- - keycloak_clientscope_type - sort the default and optional clientscope lists
- to improve the diff (https://github.com/ansible-collections/community.general/pull/9202).
- - redfish_utils module utils - remove undocumented default applytime (https://github.com/ansible-collections/community.general/pull/9114).
- - slack - fail if Slack API response is not OK with error message (https://github.com/ansible-collections/community.general/pull/9198).
- minor_changes:
- - proxmox inventory plugin - fix urllib3 ``InsecureRequestWarnings`` not being
- suppressed when a token is used (https://github.com/ansible-collections/community.general/pull/9099).
- release_summary: Regular bugfix release.
- fragments:
- - 9.5.2.yml
- - 9077-keycloak_client-fix-attributes-dict-turned-into-list.yml
- - 9082-keycloak_clientscope-fix-attributes-dict-turned-into-list.yml
- - 9099-proxmox-fix-insecure.yml
- - 9114-redfish-utils-update-remove-default-applytime.yml
- - 9124-dnf_config_manager.yml
- - 9128-homebrew_cask-name-regex-fix.yml
- - 9157-fix-dnf_config_manager-locale.yml
- - 9186-fix-broken-check-mode-in-github-key.yml
- - 9187-flatpak-lang.yml
- - 9198-fail-if-slack-api-response-is-not-ok-with-error-message.yml
- - 9202-keycloak_clientscope_type-sort-lists.yml
- release_date: '2024-12-02'
- 9.5.3:
- changes:
- bugfixes:
- - dig lookup plugin - correctly handle ``NoNameserver`` exception (https://github.com/ansible-collections/community.general/pull/9363,
- https://github.com/ansible-collections/community.general/issues/9362).
- - htpasswd - report changes when file permissions are adjusted (https://github.com/ansible-collections/community.general/issues/9485,
- https://github.com/ansible-collections/community.general/pull/9490).
- - proxmox_disk - fix async method and make ``resize_disk`` method handle errors
- correctly (https://github.com/ansible-collections/community.general/pull/9256).
- - proxmox_template - fix the wrong path called on ``proxmox_template.task_status``
- (https://github.com/ansible-collections/community.general/issues/9276, https://github.com/ansible-collections/community.general/pull/9277).
- - qubes connection plugin - fix the printing of debug information (https://github.com/ansible-collections/community.general/pull/9334).
- - redfish_utils module utils - Fix ``VerifyBiosAttributes`` command on multi
- system resource nodes (https://github.com/ansible-collections/community.general/pull/9234).
- minor_changes:
- - proxmox module utils - add method ``api_task_complete`` that can wait for
- task completion and return error message (https://github.com/ansible-collections/community.general/pull/9256).
- release_summary: Regular bugfix release.
- security_fixes:
- - keycloak_authentication - API calls did not properly set the ``priority``
- during update resulting in incorrectly sorted authentication flows. This
- apparently only affects Keycloak 25 or newer (https://github.com/ansible-collections/community.general/pull/9263).
- fragments:
- - 9.5.3.yml
- - 9234-fix-verify-bios-attributes-multi-system.yml
- - 9256-proxmox_disk-fix-async-method-of-resize_disk.yml
- - 9263-kc_authentication-api-priority.yaml
- - 9277-proxmox_template-fix-the-wrong-path-called-on-proxmox_template.task_status.yaml
- - 9334-qubes-conn.yml
- - 9363-dig-nonameservers.yml
- - 9490-htpasswd-permissions.yml
- release_date: '2024-12-31'
- 9.5.4:
- changes:
- bugfixes:
- - 'redhat_subscription - do not try to unsubscribe (i.e. remove subscriptions)
-
- when unregistering a system: newer versions of subscription-manager, as
-
- available in EL 10 and Fedora 41+, do not support entitlements anymore,
- and
-
- thus unsubscribing will fail
-
- (https://github.com/ansible-collections/community.general/pull/9578).
-
- '
- security_fixes:
- - keycloak_client - Sanitize ``saml.encryption.private.key`` so it does not
- show in the logs (https://github.com/ansible-collections/community.general/pull/9621).
- fragments:
- - 9578-redhat_subscription-no-remove-on-unregister.yml
- - 9621-keycloak_client-sanitize-saml-encryption-key.yml
- release_date: '2025-01-27'
- 9.5.5:
- changes:
- bugfixes:
- - apache2_mod_proxy - make compatible with Python 3 (https://github.com/ansible-collections/community.general/pull/9762).
- - apache2_mod_proxy - passing the cluster's page as referer for the member's
- pages. This makes the module actually work again for halfway modern Apache
- versions. According to some comments founds on the net the referer was required
- since at least 2019 for some versions of Apache 2 (https://github.com/ansible-collections/community.general/pull/9762).
- - cloudflare_dns - fix crash when deleting a DNS record or when updating a
- record with ``solo=true`` (https://github.com/ansible-collections/community.general/issues/9652,
- https://github.com/ansible-collections/community.general/pull/9649).
- - 'elasticsearch_plugin - fix ``ERROR: D is not a recognized option`` issue
- when configuring proxy settings (https://github.com/ansible-collections/community.general/pull/9774,
- https://github.com/ansible-collections/community.general/issues/9773).'
- - keycloak_client - fix and improve existing tests. The module showed a diff
- without actual changes, solved by improving the ``normalise_cr()`` function
- (https://github.com/ansible-collections/community.general/pull/9644).
- - keycloak_client - in check mode, detect whether the lists in before client
- (for example redirect URI list) contain items that the lists in the desired
- client do not contain (https://github.com/ansible-collections/community.general/pull/9739).
- - passwordstore lookup plugin - fix subkey creation even when ``create=false``
- (https://github.com/ansible-collections/community.general/issues/9105, https://github.com/ansible-collections/community.general/pull/9106).
- - 'proxmox inventory plugin - plugin did not update cache correctly after
- ``meta: refresh_inventory`` (https://github.com/ansible-collections/community.general/issues/9710,
- https://github.com/ansible-collections/community.general/pull/9760).'
- - 'redhat_subscription - use the "enable_content" option (when available)
- when
-
- registering using D-Bus, to ensure that subscription-manager enables the
-
- content on registration; this is particular important on EL 10+ and Fedora
-
- 41+
-
- (https://github.com/ansible-collections/community.general/pull/9778).
-
- '
- - xml - ensure file descriptor is closed (https://github.com/ansible-collections/community.general/pull/9695).
- release_summary: Regular bugfix release.
- fragments:
- - 9.5.5.yml
- - 9106-passwordstore-fix-subkey-creation-even-when-create-==-false.yml
- - 9644-kc_client-test-improvement-and-fix.yaml
- - 9649-cloudflare_dns-fix-crash-when-deleting-record.yml
- - 9695-xml-close-file.yml
- - 9739-keycloak_client-compare-before-desired-directly.yml
- - 9760-proxmox-inventory.yml
- - 9762-apache2_mod_proxy.yml
- - 9774-fix-elasticsearch_plugin-proxy-settings.yml
- - 9778-redhat_subscription-ensure-to-enable-content.yml
- release_date: '2025-02-24'
- 9.5.6:
- changes:
- bugfixes:
- - cloudlare_dns - handle exhausted response stream in case of HTTP errors
- to show nice error message to the user (https://github.com/ansible-collections/community.general/issues/9782,
- https://github.com/ansible-collections/community.general/pull/9818).
- - dnf_versionlock - add support for dnf5 (https://github.com/ansible-collections/community.general/issues/9556).
- - homebrew_cask - handle unusual brew version strings (https://github.com/ansible-collections/community.general/issues/8432,
- https://github.com/ansible-collections/community.general/pull/9881).
- - ipa_host - module revoked existing host certificates even if ``user_certificate``
- was not given (https://github.com/ansible-collections/community.general/pull/9694).
- - nmcli - enable changing only the order of DNS servers or search suffixes
- (https://github.com/ansible-collections/community.general/issues/8724, https://github.com/ansible-collections/community.general/pull/9880).
- - proxmox_vm_info - the module no longer expects that the key ``template``
- exists in a dictionary returned by Proxmox (https://github.com/ansible-collections/community.general/issues/9875,
- https://github.com/ansible-collections/community.general/pull/9910).
- - sudoers - display stdout and stderr raised while failed validation (https://github.com/ansible-collections/community.general/issues/9674,
- https://github.com/ansible-collections/community.general/pull/9871).
- minor_changes:
- - consul_token - fix idempotency when ``policies`` or ``roles`` are supplied
- by name (https://github.com/ansible-collections/community.general/issues/9841,
- https://github.com/ansible-collections/community.general/pull/9845).
- release_summary: Regular bugfix release.
- fragments:
- - 9.5.6.yml
- - 9694-ipa-host-certificate-revoked.yml
- - 9818-cloudflare-dns-exhausted-response.yml
- - 9845-consul_token_idempotency.yml
- - 9875-proxmox-dont-expect-key-template-to-exist.yml
- - 9880-nmcli-fix-reorder-same-dns-nameservers-search-suffixes.yml
- - dnf_versionlock.yml
- - homebrew_cask.yml
- - sudoers.yml
- release_date: '2025-03-24'
- 9.5.7:
- changes:
- bugfixes:
- - dependent look plugin - make compatible with ansible-core's Data Tagging
- feature (https://github.com/ansible-collections/community.general/pull/9833).
- - diy callback plugin - make compatible with ansible-core's Data Tagging feature
- (https://github.com/ansible-collections/community.general/pull/9833).
- - "github_deploy_key - check that key really exists on 422\_to avoid masking\
- \ other errors (https://github.com/ansible-collections/community.general/issues/6718,\
- \ https://github.com/ansible-collections/community.general/pull/10011)."
- - hashids and unicode_normalize filter plugins - avoid deprecated ``AnsibleFilterTypeError``
- on ansible-core 2.19 (https://github.com/ansible-collections/community.general/pull/9992).
- - keycloak_authentication - fix authentification config duplication for Keycloak
- < 26.2.0 (https://github.com/ansible-collections/community.general/pull/9987).
- - keycloak_client - fix the idempotency regression by normalizing the Keycloak
- response for ``after_client`` (https://github.com/ansible-collections/community.general/issues/9905,
- https://github.com/ansible-collections/community.general/pull/9976).
- - proxmox inventory plugin - fix ``ansible_host`` staying empty for certain
- Proxmox nodes (https://github.com/ansible-collections/community.general/issues/5906,
- https://github.com/ansible-collections/community.general/pull/9952).
- - proxmox_disk - fail gracefully if ``storage`` is required but not provided
- by the user (https://github.com/ansible-collections/community.general/issues/9941,
- https://github.com/ansible-collections/community.general/pull/9963).
- - reveal_ansible_type filter plugin and ansible_type test plugin - make compatible
- with ansible-core's Data Tagging feature (https://github.com/ansible-collections/community.general/pull/9833).
- - sysrc - no longer always reporting ``changed=true`` when ``state=absent``.
- This fixes the method ``exists()`` (https://github.com/ansible-collections/community.general/issues/10004,
- https://github.com/ansible-collections/community.general/pull/10005).
- - yaml callback plugin - use ansible-core internals to avoid breakage with
- Data Tagging (https://github.com/ansible-collections/community.general/pull/9833).
- known_issues:
- - reveal_ansible_type filter plugin and ansible_type test plugin - note that
- ansible-core's Data Tagging feature implements new aliases, such as ``_AnsibleTaggedStr``
- for ``str``, ``_AnsibleTaggedInt`` for ``int``, and ``_AnsibleTaggedFloat``
- for ``float`` (https://github.com/ansible-collections/community.general/pull/9833).
- minor_changes:
- - apache2_module - added workaround for new PHP module name, from ``php7_module``
- to ``php_module`` (https://github.com/ansible-collections/community.general/pull/9951).
- release_summary: Regular bugfix release.
- fragments:
- - 10005-fix-method-exists-in-sysrc.yml
- - 10011-github_deploy_key-check-key-present.yml
- - 9.5.7.yml
- - 9833-data-tagging.yml
- - 9951-mod-php-identifier.yml
- - 9952-proxmox-inventory-plugin-improve-ansible_host.yml
- - 9963-proxmox_disk-storage.yml
- - 9976-keycloak_client-fix-idempotency-regression.yml
- - 9987-keycloak-auth-flow-fix-config.yaml
- - 9992-filtertypeerror.yml
- release_date: '2025-04-21'
- 9.5.8:
- changes:
- bugfixes:
- - cobbler_system - fix bug with Cobbler >= 3.4.0 caused by giving more than
- 2 positional arguments to ``CobblerXMLRPCInterface.get_system_handle()``
- (https://github.com/ansible-collections/community.general/issues/8506, https://github.com/ansible-collections/community.general/pull/10145).
- - kdeconfig - allow option values beginning with a dash (https://github.com/ansible-collections/community.general/issues/10127,
- https://github.com/ansible-collections/community.general/pull/10128).
- - keycloak_user_rolemapping - fix ``--diff`` mode (https://github.com/ansible-collections/community.general/issues/10067,
- https://github.com/ansible-collections/community.general/pull/10075).
- - pickle cache plugin - avoid extra JSON serialization with ansible-core >=
- 2.19 (https://github.com/ansible-collections/community.general/pull/10136).
- - rundeck_acl_policy - ensure that project ACLs are sent to the correct endpoint
- (https://github.com/ansible-collections/community.general/pull/10097).
- - sysrc - split the output of ``sysrc -e -a`` on the first ``=`` only (https://github.com/ansible-collections/community.general/issues/10120,
- https://github.com/ansible-collections/community.general/pull/10121).
- release_summary: Regular bugfix release.
- fragments:
- - 10075-keycloak_user_rolemapping-diff.yml
- - 10097-fix-rundeck_acl_policy-project-endpoint.yml
- - 10121-sysrc-fix-split-first-separator.yml
- - 10128-mark-end-of-options.yml
- - 10136-cache-pickle-json.yml
- - 10145-fix-typeerror-cobbler-xmlrpc.yml
- - 9.5.8.yml
- release_date: '2025-05-19'
- 9.5.9:
- changes:
- bugfixes:
- - yaml callback plugin - adjust to latest changes in ansible-core devel (https://github.com/ansible-collections/community.general/pull/10212).
- - yaml callback plugin - when using ansible-core 2.19.0b2 or newer, uses a
- new utility provided by ansible-core. This allows us to remove all hacks
- and vendored code that was part of the plugin for ansible-core versions
- with Data Tagging so far (https://github.com/ansible-collections/community.general/pull/10242).
- release_summary: Bugfix release.
- fragments:
- - 10212-yaml.yml
- - 10242-yaml.yml
- - 9.5.9.yml
- release_date: '2025-06-15'
+ancestor: 11.0.0
+releases: {}
diff --git a/changelogs/config.yaml b/changelogs/config.yaml
index 32ffe27f2b..578b8c3765 100644
--- a/changelogs/config.yaml
+++ b/changelogs/config.yaml
@@ -7,9 +7,9 @@ changelog_filename_template: ../CHANGELOG.rst
changelog_filename_version_depth: 0
changes_file: changelog.yaml
changes_format: combined
+ignore_other_fragment_extensions: true
keep_fragments: false
mention_ancestor: true
-flatmap: true
new_plugins_after_name: removed_features
notesdir: fragments
output_formats:
@@ -40,3 +40,4 @@ use_fqcn: true
add_plugin_period: true
changelog_nice_yaml: true
changelog_sort: version
+vcs: auto
diff --git a/changelogs/fragments/10227-pacemaker-cluster-and-resource-enhancement.yml b/changelogs/fragments/10227-pacemaker-cluster-and-resource-enhancement.yml
new file mode 100644
index 0000000000..d1cfee7816
--- /dev/null
+++ b/changelogs/fragments/10227-pacemaker-cluster-and-resource-enhancement.yml
@@ -0,0 +1,7 @@
+deprecated_features:
+ - pacemaker_cluster - the parameter ``state`` will become a required parameter in community.general 12.0.0 (https://github.com/ansible-collections/community.general/pull/10227).
+
+minor_changes:
+ - pacemaker_cluster - add ``state=maintenance`` for managing pacemaker maintenance mode (https://github.com/ansible-collections/community.general/issues/10200, https://github.com/ansible-collections/community.general/pull/10227).
+ - pacemaker_cluster - rename ``node`` to ``name`` and add ``node`` alias (https://github.com/ansible-collections/community.general/pull/10227).
+ - pacemaker_resource - enhance module by removing duplicative code (https://github.com/ansible-collections/community.general/pull/10227).
diff --git a/changelogs/fragments/10231-keycloak-add-client-credentials-authentication.yml b/changelogs/fragments/10231-keycloak-add-client-credentials-authentication.yml
new file mode 100644
index 0000000000..eec12e8669
--- /dev/null
+++ b/changelogs/fragments/10231-keycloak-add-client-credentials-authentication.yml
@@ -0,0 +1,2 @@
+minor_changes:
+ - keycloak - add support for ``grant_type=client_credentials`` to all keycloak modules, so that specifying ``auth_client_id`` and ``auth_client_secret`` is sufficient for authentication (https://github.com/ansible-collections/community.general/pull/10231).
diff --git a/changelogs/fragments/10267-add-cloudflare-ptr-record-support.yml b/changelogs/fragments/10267-add-cloudflare-ptr-record-support.yml
new file mode 100644
index 0000000000..29d71ca393
--- /dev/null
+++ b/changelogs/fragments/10267-add-cloudflare-ptr-record-support.yml
@@ -0,0 +1,2 @@
+minor_changes:
+ - cloudflare_dns - adds support for PTR records (https://github.com/ansible-collections/community.general/pull/10267).
diff --git a/changelogs/fragments/10269-cloudflare-dns-refactor.yml b/changelogs/fragments/10269-cloudflare-dns-refactor.yml
new file mode 100644
index 0000000000..9f91040d63
--- /dev/null
+++ b/changelogs/fragments/10269-cloudflare-dns-refactor.yml
@@ -0,0 +1,2 @@
+minor_changes:
+ - cloudflare_dns - simplify validations and refactor some code, no functional changes (https://github.com/ansible-collections/community.general/pull/10269).
diff --git a/changelogs/fragments/10271--disable_lookups.yml b/changelogs/fragments/10271--disable_lookups.yml
new file mode 100644
index 0000000000..d28e2ac833
--- /dev/null
+++ b/changelogs/fragments/10271--disable_lookups.yml
@@ -0,0 +1,3 @@
+bugfixes:
+ - "icinga2 inventory plugin - avoid using deprecated option when templating options (https://github.com/ansible-collections/community.general/pull/10271)."
+ - "linode inventory plugin - avoid using deprecated option when templating options (https://github.com/ansible-collections/community.general/pull/10271)."
diff --git a/changelogs/fragments/10285-fstr-plugins.yml b/changelogs/fragments/10285-fstr-plugins.yml
new file mode 100644
index 0000000000..6fff590fee
--- /dev/null
+++ b/changelogs/fragments/10285-fstr-plugins.yml
@@ -0,0 +1,7 @@
+minor_changes:
+ - dense callback plugin - use f-strings instead of concatenation (https://github.com/ansible-collections/community.general/pull/10285).
+ - mail callback plugin - use f-strings instead of concatenation (https://github.com/ansible-collections/community.general/pull/10285).
+ - wsl connection plugin - use f-strings instead of concatenation (https://github.com/ansible-collections/community.general/pull/10285).
+ - jc filter plugin - use f-strings instead of concatenation (https://github.com/ansible-collections/community.general/pull/10285).
+ - iocage inventory plugin - use f-strings instead of concatenation (https://github.com/ansible-collections/community.general/pull/10285).
+ - xen_orchestra inventory plugin - use f-strings instead of concatenation (https://github.com/ansible-collections/community.general/pull/10285).
diff --git a/changelogs/fragments/10311-xfconf-refactor.yml b/changelogs/fragments/10311-xfconf-refactor.yml
new file mode 100644
index 0000000000..9d71bd17d8
--- /dev/null
+++ b/changelogs/fragments/10311-xfconf-refactor.yml
@@ -0,0 +1,2 @@
+minor_changes:
+ - xfconf - minor adjustments the the code (https://github.com/ansible-collections/community.general/pull/10311).
diff --git a/changelogs/fragments/10323-nmcli-improvements.yml b/changelogs/fragments/10323-nmcli-improvements.yml
new file mode 100644
index 0000000000..53436ea7d6
--- /dev/null
+++ b/changelogs/fragments/10323-nmcli-improvements.yml
@@ -0,0 +1,2 @@
+minor_changes:
+ - nmcli - simplify validations and refactor some code, no functional changes (https://github.com/ansible-collections/community.general/pull/10323).
diff --git a/changelogs/fragments/10328-redundant-brackets.yml b/changelogs/fragments/10328-redundant-brackets.yml
new file mode 100644
index 0000000000..f8f74a336c
--- /dev/null
+++ b/changelogs/fragments/10328-redundant-brackets.yml
@@ -0,0 +1,32 @@
+minor_changes:
+ - logstash callback plugin - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328).
+ - keycloak module utils - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328).
+ - python_runner module utils - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328).
+ - cloudflare_dns - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328).
+ - crypttab - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328).
+ - datadog_monitor - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328).
+ - gitlab_deploy_key - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328).
+ - gitlab_group_access_token - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328).
+ - gitlab_hook - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328).
+ - gitlab_project_access_token - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328).
+ - gitlab_runner - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328).
+ - ipa_group - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328).
+ - jenkins_build - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328).
+ - jenkins_build_info - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328).
+ - nmcli - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328).
+ - oneandone_firewall_policy - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328).
+ - oneandone_load_balancer - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328).
+ - oneandone_monitoring_policy - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328).
+ - onepassword_info - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328).
+ - osx_defaults - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328).
+ - ovh_ip_loadbalancing_backend - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328).
+ - packet_device - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328).
+ - pagerduty - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328).
+ - pingdom - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328).
+ - rhevm - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328).
+ - rocketchat - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328).
+ - sensu_silence - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328).
+ - sl_vm - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328).
+ - urpmi - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328).
+ - xattr - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328).
+ - xml - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328).
diff --git a/changelogs/fragments/10329-catapult-deprecation.yml b/changelogs/fragments/10329-catapult-deprecation.yml
new file mode 100644
index 0000000000..5e5209edda
--- /dev/null
+++ b/changelogs/fragments/10329-catapult-deprecation.yml
@@ -0,0 +1,2 @@
+deprecated_features:
+ - catapult - module is deprecated and will be removed in community.general 13.0.0 (https://github.com/ansible-collections/community.general/issues/10318, https://github.com/ansible-collections/community.general/pull/10329).
diff --git a/changelogs/fragments/10339-github_app_access_token.yml b/changelogs/fragments/10339-github_app_access_token.yml
new file mode 100644
index 0000000000..00cd71f559
--- /dev/null
+++ b/changelogs/fragments/10339-github_app_access_token.yml
@@ -0,0 +1,2 @@
+bugfixes:
+ - github_release - support multiple types of GitHub tokens; no longer failing when ``ghs_`` token type is provided (https://github.com/ansible-collections/community.general/issues/10338, https://github.com/ansible-collections/community.general/pull/10339).
\ No newline at end of file
diff --git a/changelogs/fragments/10349-incus_connection-error-handling.yml b/changelogs/fragments/10349-incus_connection-error-handling.yml
new file mode 100644
index 0000000000..b35da354d2
--- /dev/null
+++ b/changelogs/fragments/10349-incus_connection-error-handling.yml
@@ -0,0 +1,2 @@
+bugfixes:
+ - incus connection plugin - fix error handling to return more useful Ansible errors to the user (https://github.com/ansible-collections/community.general/issues/10344, https://github.com/ansible-collections/community.general/pull/10349).
diff --git a/changelogs/fragments/10359-dependent.yml b/changelogs/fragments/10359-dependent.yml
new file mode 100644
index 0000000000..e48a6142e8
--- /dev/null
+++ b/changelogs/fragments/10359-dependent.yml
@@ -0,0 +1,2 @@
+bugfixes:
+ - "dependent lookup plugin - avoid deprecated ansible-core 2.19 functionality (https://github.com/ansible-collections/community.general/pull/10359)."
diff --git a/changelogs/fragments/10417-sysrc-refactor.yml b/changelogs/fragments/10417-sysrc-refactor.yml
new file mode 100644
index 0000000000..b1b5db632b
--- /dev/null
+++ b/changelogs/fragments/10417-sysrc-refactor.yml
@@ -0,0 +1,4 @@
+minor_changes:
+ - sysrc - adjustments to the code (https://github.com/ansible-collections/community.general/pull/10417).
+bugfixes:
+ - sysrc - fixes parsing with multi-line variables (https://github.com/ansible-collections/community.general/issues/10394, https://github.com/ansible-collections/community.general/pull/10417).
\ No newline at end of file
diff --git a/changelogs/fragments/10442-apk-fix-empty-names.yml b/changelogs/fragments/10442-apk-fix-empty-names.yml
new file mode 100644
index 0000000000..24d68b52df
--- /dev/null
+++ b/changelogs/fragments/10442-apk-fix-empty-names.yml
@@ -0,0 +1,3 @@
+bugfixes:
+ - apk - handle empty name strings properly
+ (https://github.com/ansible-collections/community.general/issues/10441, https://github.com/ansible-collections/community.general/pull/10442).
\ No newline at end of file
diff --git a/changelogs/fragments/10445-cronvar-reject-empty-values.yml b/changelogs/fragments/10445-cronvar-reject-empty-values.yml
new file mode 100644
index 0000000000..1bf39619cc
--- /dev/null
+++ b/changelogs/fragments/10445-cronvar-reject-empty-values.yml
@@ -0,0 +1,2 @@
+bugfixes:
+ - "cronvar - handle empty strings on ``value`` properly (https://github.com/ansible-collections/community.general/issues/10439, https://github.com/ansible-collections/community.general/pull/10445)."
diff --git a/changelogs/fragments/10455-capabilities-improve-error-detection.yml b/changelogs/fragments/10455-capabilities-improve-error-detection.yml
new file mode 100644
index 0000000000..40337a424b
--- /dev/null
+++ b/changelogs/fragments/10455-capabilities-improve-error-detection.yml
@@ -0,0 +1,2 @@
+bugfixes:
+ - capabilities - using invalid path (symlink/directory/...) returned unrelated and incoherent error messages (https://github.com/ansible-collections/community.general/issues/5649, https://github.com/ansible-collections/community.general/pull/10455).
\ No newline at end of file
diff --git a/changelogs/fragments/10458-listen_port_facts-prevent-type-error.yml b/changelogs/fragments/10458-listen_port_facts-prevent-type-error.yml
new file mode 100644
index 0000000000..70af0932b3
--- /dev/null
+++ b/changelogs/fragments/10458-listen_port_facts-prevent-type-error.yml
@@ -0,0 +1,2 @@
+bugfixes:
+ - "listen_port_facts - avoid crash when required commands are missing (https://github.com/ansible-collections/community.general/issues/10457, https://github.com/ansible-collections/community.general/pull/10458)."
\ No newline at end of file
diff --git a/changelogs/fragments/10459-deprecations.yml b/changelogs/fragments/10459-deprecations.yml
new file mode 100644
index 0000000000..4b3f317454
--- /dev/null
+++ b/changelogs/fragments/10459-deprecations.yml
@@ -0,0 +1,6 @@
+bugfixes:
+ - "apache2_module - avoid ansible-core 2.19 deprecation (https://github.com/ansible-collections/community.general/pull/10459)."
+ - "htpasswd - avoid ansible-core 2.19 deprecation (https://github.com/ansible-collections/community.general/pull/10459)."
+ - "syspatch - avoid ansible-core 2.19 deprecation (https://github.com/ansible-collections/community.general/pull/10459)."
+ - "sysupgrade - avoid ansible-core 2.19 deprecation (https://github.com/ansible-collections/community.general/pull/10459)."
+ - "zypper_repository - avoid ansible-core 2.19 deprecation (https://github.com/ansible-collections/community.general/pull/10459)."
diff --git a/changelogs/fragments/10461-cronvar-non-existent-dir-crash-fix.yml b/changelogs/fragments/10461-cronvar-non-existent-dir-crash-fix.yml
new file mode 100644
index 0000000000..c4b77299f5
--- /dev/null
+++ b/changelogs/fragments/10461-cronvar-non-existent-dir-crash-fix.yml
@@ -0,0 +1,2 @@
+bugfixes:
+ - "cronvar - fix crash on missing ``cron_file`` parent directories (https://github.com/ansible-collections/community.general/issues/10460, https://github.com/ansible-collections/community.general/pull/10461)."
diff --git a/changelogs/fragments/10491-irc.yml b/changelogs/fragments/10491-irc.yml
new file mode 100644
index 0000000000..74867e71a7
--- /dev/null
+++ b/changelogs/fragments/10491-irc.yml
@@ -0,0 +1,2 @@
+bugfixes:
+ - "irc - pass hostname to ``wrap_socket()`` if ``use_tls=true`` and ``validate_certs=true`` (https://github.com/ansible-collections/community.general/issues/10472, https://github.com/ansible-collections/community.general/pull/10491)."
diff --git a/changelogs/fragments/10494-rfdn-1.yml b/changelogs/fragments/10494-rfdn-1.yml
new file mode 100644
index 0000000000..09a0c442b0
--- /dev/null
+++ b/changelogs/fragments/10494-rfdn-1.yml
@@ -0,0 +1,27 @@
+minor_changes:
+ - aerospike_migrations - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494).
+ - airbrake_deployment - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494).
+ - bigpanda - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494).
+ - bootc_manage - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494).
+ - bower - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494).
+ - btrfs_subvolume - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494).
+ - bundler - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494).
+ - campfire - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494).
+ - cargo - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494).
+ - catapult - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494).
+ - cisco_webex - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494).
+ - consul_kv - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494).
+ - consul_policy - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494).
+ - copr - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494).
+ - datadog_downtime - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494).
+ - datadog_monitor - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494).
+ - dconf - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494).
+ - dimensiondata_network - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494).
+ - dimensiondata_vlan - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494).
+ - dnf_config_manager - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494).
+ - dnsmadeeasy - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494).
+ - dpkg_divert - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494).
+ - easy_install - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494).
+ - elasticsearch_plugin - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494).
+ - facter - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494).
+ - filesystem - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494).
diff --git a/changelogs/fragments/10505-rfdn-2.yml b/changelogs/fragments/10505-rfdn-2.yml
new file mode 100644
index 0000000000..89aeab9356
--- /dev/null
+++ b/changelogs/fragments/10505-rfdn-2.yml
@@ -0,0 +1,39 @@
+minor_changes:
+ - gem - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505).
+ - git_config_info - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505).
+ - github_deploy_key - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505).
+ - github_repo - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505).
+ - github_webhook - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505).
+ - github_webhook_info - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505).
+ - gitlab_branch - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505).
+ - gitlab_group_access_token - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505).
+ - gitlab_group_variable - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505).
+ - gitlab_hook - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505).
+ - gitlab_instance_variable - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505).
+ - gitlab_issue - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505).
+ - gitlab_label - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505).
+ - gitlab_merge_request - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505).
+ - gitlab_milestone - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505).
+ - gitlab_project - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505).
+ - gitlab_project_access_token - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505).
+ - gitlab_project_variable - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505).
+ - grove - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505).
+ - hg - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505).
+ - homebrew - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505).
+ - homebrew_cask - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505).
+ - homebrew_tap - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505).
+ - honeybadger_deployment - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505).
+ - htpasswd - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505).
+ - icinga2_host - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505).
+ - influxdb_user - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505).
+ - ini_file - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505).
+ - ipa_dnsrecord - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505).
+ - ipa_dnszone - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505).
+ - ipa_service - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505).
+ - ipbase_info - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505).
+ - ipwcli_dns - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505).
+ - irc - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505).
+ - jabber - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505).
+ - jenkins_credential - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505).
+ - jenkins_job - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505).
+ - jenkins_script - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505).
diff --git a/changelogs/fragments/10507-rfdn-3.yml b/changelogs/fragments/10507-rfdn-3.yml
new file mode 100644
index 0000000000..fae9d118bc
--- /dev/null
+++ b/changelogs/fragments/10507-rfdn-3.yml
@@ -0,0 +1,35 @@
+minor_changes:
+ - keycloak_authz_authorization_scope - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507).
+ - keycloak_authz_permission - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507).
+ - keycloak_role - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507).
+ - keycloak_userprofile - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507).
+ - keyring - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507).
+ - kibana_plugin - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507).
+ - layman - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507).
+ - ldap_attrs - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507).
+ - ldap_inc - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507).
+ - librato_annotation - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507).
+ - lldp - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507).
+ - logentries - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507).
+ - lxca_cmms - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507).
+ - lxca_nodes - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507).
+ - macports - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507).
+ - mail - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507).
+ - manageiq_alerts - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507).
+ - manageiq_group - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507).
+ - manageiq_policies - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507).
+ - manageiq_policies_info - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507).
+ - manageiq_tags - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507).
+ - manageiq_tenant - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507).
+ - matrix - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507).
+ - mattermost - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507).
+ - maven_artifact - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507).
+ - memset_dns_reload - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507).
+ - memset_zone - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507).
+ - memset_zone_record - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507).
+ - mqtt - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507).
+ - mssql_db - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507).
+ - mssql_script - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507).
+ - netcup_dns - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507).
+ - newrelic_deployment - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507).
+ - nsupdate - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507).
diff --git a/changelogs/fragments/10512-rfdn-4.yml b/changelogs/fragments/10512-rfdn-4.yml
new file mode 100644
index 0000000000..6d8f9e7d77
--- /dev/null
+++ b/changelogs/fragments/10512-rfdn-4.yml
@@ -0,0 +1,42 @@
+minor_changes:
+ - oci_vcn - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512).
+ - one_image_info - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512).
+ - one_template - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512).
+ - one_vnet - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512).
+ - onepassword_info - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512).
+ - oneview_fc_network_info - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512).
+ - opendj_backendprop - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512).
+ - ovh_monthly_billing - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512).
+ - pagerduty - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512).
+ - pagerduty_change - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512).
+ - pagerduty_user - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512).
+ - pam_limits - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512).
+ - pear - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512).
+ - pkgng - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512).
+ - pnpm - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512).
+ - portage - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512).
+ - pritunl_org - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512).
+ - pritunl_org_info - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512).
+ - pritunl_user - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512).
+ - pritunl_user_info - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512).
+ - pubnub_blocks - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512).
+ - pushbullet - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512).
+ - pushover - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512).
+ - redis_data - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512).
+ - redis_data_incr - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512).
+ - riak - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512).
+ - rocketchat - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512).
+ - rollbar_deployment - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512).
+ - say - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512).
+ - scaleway_database_backup - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512).
+ - sendgrid - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512).
+ - sensu_silence - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512).
+ - sorcery - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512).
+ - ssh_config - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512).
+ - statusio_maintenance - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512).
+ - svr4pkg - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512).
+ - swdepot - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512).
+ - syslogger - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512).
+ - sysrc - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512).
+ - systemd_creds_decrypt - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512).
+ - systemd_creds_encrypt - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512).
diff --git a/changelogs/fragments/10513-rfdn-5.yml b/changelogs/fragments/10513-rfdn-5.yml
new file mode 100644
index 0000000000..d930d7345c
--- /dev/null
+++ b/changelogs/fragments/10513-rfdn-5.yml
@@ -0,0 +1,18 @@
+minor_changes:
+ - taiga_issue - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10513).
+ - twilio - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10513).
+ - utm_aaa_group - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10513).
+ - utm_ca_host_key_cert - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10513).
+ - utm_dns_host - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10513).
+ - utm_network_interface_address - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10513).
+ - utm_proxy_auth_profile - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10513).
+ - utm_proxy_exception - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10513).
+ - utm_proxy_frontend - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10513).
+ - utm_proxy_location - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10513).
+ - vertica_configuration - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10513).
+ - vertica_info - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10513).
+ - vertica_role - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10513).
+ - xbps - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10513).
+ - yarn - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10513).
+ - zypper - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10513).
+ - zypper_repository - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10513).
diff --git a/changelogs/fragments/10531-wsl-paramiko.yml b/changelogs/fragments/10531-wsl-paramiko.yml
new file mode 100644
index 0000000000..08257d6c78
--- /dev/null
+++ b/changelogs/fragments/10531-wsl-paramiko.yml
@@ -0,0 +1,3 @@
+bugfixes:
+ - "wsl connection plugin - avoid deprecated ansible-core paramiko import helper, import paramiko directly instead
+ (https://github.com/ansible-collections/community.general/issues/10515, https://github.com/ansible-collections/community.general/pull/10531)."
diff --git a/changelogs/fragments/10532-apk.yml b/changelogs/fragments/10532-apk.yml
new file mode 100644
index 0000000000..84c5d985e8
--- /dev/null
+++ b/changelogs/fragments/10532-apk.yml
@@ -0,0 +1,2 @@
+bugfixes:
+ - "apk - fix check for empty/whitespace-only package names (https://github.com/ansible-collections/community.general/pull/10532)."
diff --git a/changelogs/fragments/10539-json_query.yml b/changelogs/fragments/10539-json_query.yml
new file mode 100644
index 0000000000..7e84b7ecb0
--- /dev/null
+++ b/changelogs/fragments/10539-json_query.yml
@@ -0,0 +1,2 @@
+bugfixes:
+ - "json_query filter plugin - make compatible with lazy evaluation list and dictionary types of ansible-core 2.19 (https://github.com/ansible-collections/community.general/pull/10539)."
diff --git a/changelogs/fragments/10566-merge_variables.yml b/changelogs/fragments/10566-merge_variables.yml
new file mode 100644
index 0000000000..c0de6dd845
--- /dev/null
+++ b/changelogs/fragments/10566-merge_variables.yml
@@ -0,0 +1,2 @@
+bugfixes:
+ - "merge_variables lookup plugin - avoid deprecated functionality from ansible-core 2.19 (https://github.com/ansible-collections/community.general/pull/10566)."
diff --git a/changelogs/fragments/9499-typetalk-deprecation.yml b/changelogs/fragments/9499-typetalk-deprecation.yml
new file mode 100644
index 0000000000..8323bbe959
--- /dev/null
+++ b/changelogs/fragments/9499-typetalk-deprecation.yml
@@ -0,0 +1,2 @@
+deprecated_features:
+ - typetalk - module is deprecated and will be removed in community.general 13.0.0 (https://github.com/ansible-collections/community.general/pull/9499).
diff --git a/changelogs/fragments/become-pipelining.yml b/changelogs/fragments/become-pipelining.yml
new file mode 100644
index 0000000000..201d85f71c
--- /dev/null
+++ b/changelogs/fragments/become-pipelining.yml
@@ -0,0 +1,3 @@
+bugfixes:
+ - "doas become plugin - disable pipelining on ansible-core 2.19+. The plugin does not work with pipelining, and since ansible-core 2.19 become plugins can indicate that they do not work with pipelining (https://github.com/ansible-collections/community.general/issues/9977, https://github.com/ansible-collections/community.general/pull/10537)."
+ - "machinectl become plugin - disable pipelining on ansible-core 2.19+. The plugin does not work with pipelining, and since ansible-core 2.19 become plugins can indicate that they do not work with pipelining (https://github.com/ansible-collections/community.general/pull/10537)."
diff --git a/changelogs/fragments/logstash.yml b/changelogs/fragments/logstash.yml
new file mode 100644
index 0000000000..1c7ec89b7d
--- /dev/null
+++ b/changelogs/fragments/logstash.yml
@@ -0,0 +1,3 @@
+---
+bugfixes:
+ - logstash callback plugin - remove reference to Python 2 library (https://github.com/ansible-collections/community.general/pull/10345).
diff --git a/docs/docsite/extra-docs.yml b/docs/docsite/extra-docs.yml
index f73d0fe012..4594ab4c2d 100644
--- a/docs/docsite/extra-docs.yml
+++ b/docs/docsite/extra-docs.yml
@@ -8,9 +8,10 @@ sections:
toctree:
- filter_guide
- test_guide
- - title: Cloud Guides
+ - title: Technology Guides
toctree:
- guide_alicloud
+ - guide_iocage
- guide_online
- guide_packet
- guide_scaleway
@@ -20,3 +21,4 @@ sections:
- guide_vardict
- guide_cmdrunner
- guide_modulehelper
+ - guide_uthelper
diff --git a/docs/docsite/helper/keep_keys/filter_guide-abstract_informations-lists_of_dictionaries-keep_keys.rst.j2 b/docs/docsite/helper/keep_keys/filter_guide-abstract_informations-lists_of_dictionaries-keep_keys.rst.j2
index 77281549ba..64ac1ff0c2 100644
--- a/docs/docsite/helper/keep_keys/filter_guide-abstract_informations-lists_of_dictionaries-keep_keys.rst.j2
+++ b/docs/docsite/helper/keep_keys/filter_guide-abstract_informations-lists_of_dictionaries-keep_keys.rst.j2
@@ -36,7 +36,7 @@ gives
result:
{{ tests.0.result | to_yaml(indent=2) | indent(5) }}
-
+
.. versionadded:: 9.1.0
* The results of the below examples 1-5 are all the same:
diff --git a/docs/docsite/helper/remove_keys/filter_guide-abstract_informations-lists_of_dictionaries-remove_keys.rst.j2 b/docs/docsite/helper/remove_keys/filter_guide-abstract_informations-lists_of_dictionaries-remove_keys.rst.j2
index 62b25c344c..6c201d5b4e 100644
--- a/docs/docsite/helper/remove_keys/filter_guide-abstract_informations-lists_of_dictionaries-remove_keys.rst.j2
+++ b/docs/docsite/helper/remove_keys/filter_guide-abstract_informations-lists_of_dictionaries-remove_keys.rst.j2
@@ -36,7 +36,7 @@ gives
result:
{{ tests.0.result | to_yaml(indent=2) | indent(5) }}
-
+
.. versionadded:: 9.1.0
* The results of the below examples 1-5 are all the same:
diff --git a/docs/docsite/helper/replace_keys/filter_guide-abstract_informations-lists_of_dictionaries-replace_keys.rst.j2 b/docs/docsite/helper/replace_keys/filter_guide-abstract_informations-lists_of_dictionaries-replace_keys.rst.j2
index fb0af32f2f..0c0ba8f0be 100644
--- a/docs/docsite/helper/replace_keys/filter_guide-abstract_informations-lists_of_dictionaries-replace_keys.rst.j2
+++ b/docs/docsite/helper/replace_keys/filter_guide-abstract_informations-lists_of_dictionaries-replace_keys.rst.j2
@@ -37,7 +37,7 @@ gives
result:
{{ tests.0.result | to_yaml(indent=2) | indent(5) }}
-
+
.. versionadded:: 9.1.0
* The results of the below examples 1-3 are all the same:
diff --git a/docs/docsite/rst/filter_guide-abstract_informations-lists_of_dictionaries-keep_keys.rst b/docs/docsite/rst/filter_guide-abstract_informations-lists_of_dictionaries-keep_keys.rst
index 488cb2ce7d..3549d29ba7 100644
--- a/docs/docsite/rst/filter_guide-abstract_informations-lists_of_dictionaries-keep_keys.rst
+++ b/docs/docsite/rst/filter_guide-abstract_informations-lists_of_dictionaries-keep_keys.rst
@@ -44,7 +44,7 @@ gives
- {k0_x0: A0, k1_x1: B0}
- {k0_x0: A1, k1_x1: B1}
-
+
.. versionadded:: 9.1.0
* The results of the below examples 1-5 are all the same:
diff --git a/docs/docsite/rst/filter_guide-abstract_informations-lists_of_dictionaries-remove_keys.rst b/docs/docsite/rst/filter_guide-abstract_informations-lists_of_dictionaries-remove_keys.rst
index 03d4710f3a..4ac87ab79c 100644
--- a/docs/docsite/rst/filter_guide-abstract_informations-lists_of_dictionaries-remove_keys.rst
+++ b/docs/docsite/rst/filter_guide-abstract_informations-lists_of_dictionaries-remove_keys.rst
@@ -46,7 +46,7 @@ gives
- k2_x2: [C1]
k3_x3: bar
-
+
.. versionadded:: 9.1.0
* The results of the below examples 1-5 are all the same:
diff --git a/docs/docsite/rst/filter_guide-abstract_informations-lists_of_dictionaries-replace_keys.rst b/docs/docsite/rst/filter_guide-abstract_informations-lists_of_dictionaries-replace_keys.rst
index ba1bcad502..d0eb202bfe 100644
--- a/docs/docsite/rst/filter_guide-abstract_informations-lists_of_dictionaries-replace_keys.rst
+++ b/docs/docsite/rst/filter_guide-abstract_informations-lists_of_dictionaries-replace_keys.rst
@@ -53,7 +53,7 @@ gives
k2_x2: [C1]
k3_x3: bar
-
+
.. versionadded:: 9.1.0
* The results of the below examples 1-3 are all the same:
diff --git a/docs/docsite/rst/filter_guide-abstract_informations-lists_of_dictionaries.rst b/docs/docsite/rst/filter_guide-abstract_informations-lists_of_dictionaries.rst
index 42737c44b7..64a82536d8 100644
--- a/docs/docsite/rst/filter_guide-abstract_informations-lists_of_dictionaries.rst
+++ b/docs/docsite/rst/filter_guide-abstract_informations-lists_of_dictionaries.rst
@@ -4,7 +4,7 @@
SPDX-License-Identifier: GPL-3.0-or-later
.. _ansible_collections.community.general.docsite.filter_guide.filter_guide_abstract_informations.lists_of_dicts:
-
+
Lists of dictionaries
^^^^^^^^^^^^^^^^^^^^^
diff --git a/docs/docsite/rst/filter_guide.rst b/docs/docsite/rst/filter_guide.rst
index 1c6468ddec..da8a90af3c 100644
--- a/docs/docsite/rst/filter_guide.rst
+++ b/docs/docsite/rst/filter_guide.rst
@@ -8,7 +8,7 @@
community.general Filter Guide
==============================
-The :ref:`community.general collection ` offers several useful filter plugins.
+The :anscollection:`community.general collection ` offers several useful filter plugins.
.. toctree::
:maxdepth: 2
diff --git a/docs/docsite/rst/filter_guide_abstract_informations_dictionaries.rst b/docs/docsite/rst/filter_guide_abstract_informations_dictionaries.rst
index 3059b00321..e5b5bb7e36 100644
--- a/docs/docsite/rst/filter_guide_abstract_informations_dictionaries.rst
+++ b/docs/docsite/rst/filter_guide_abstract_informations_dictionaries.rst
@@ -26,8 +26,8 @@ You can use the :ansplugin:`community.general.dict_kv filter `_
+* `man iocage `_
+* `Jails and Containers `_
+
+.. note::
+ The output of the examples is YAML formatted. See the option :ansopt:`ansible.builtin.default#callback:result_format`.
+
+.. toctree::
+ :caption: Table of Contents
+ :maxdepth: 1
+
+ guide_iocage_inventory_basics
+ guide_iocage_inventory_dhcp
+ guide_iocage_inventory_hooks
+ guide_iocage_inventory_properties
+ guide_iocage_inventory_tags
+ guide_iocage_inventory_aliases
diff --git a/docs/docsite/rst/guide_iocage_inventory_aliases.rst b/docs/docsite/rst/guide_iocage_inventory_aliases.rst
new file mode 100644
index 0000000000..431403d733
--- /dev/null
+++ b/docs/docsite/rst/guide_iocage_inventory_aliases.rst
@@ -0,0 +1,200 @@
+..
+ Copyright (c) Ansible Project
+ GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+ SPDX-License-Identifier: GPL-3.0-or-later
+
+.. _ansible_collections.community.general.docsite.guide_iocage.guide_iocage_inventory.guide_iocage_inventory_aliases:
+
+Aliases
+-------
+
+Quoting :ref:`inventory_aliases`:
+
+ The ``inventory_hostname`` is the unique identifier for a host in Ansible, this can be an IP or a hostname, but also just an 'alias' or short name for the host.
+
+As root at the iocage host, stop and destroy all jails:
+
+.. code-block:: console
+
+ shell> iocage stop ALL
+ * Stopping srv_1
+ + Executing prestop OK
+ + Stopping services OK
+ + Tearing down VNET OK
+ + Removing devfs_ruleset: 1000 OK
+ + Removing jail process OK
+ + Executing poststop OK
+ * Stopping srv_2
+ + Executing prestop OK
+ + Stopping services OK
+ + Tearing down VNET OK
+ + Removing devfs_ruleset: 1001 OK
+ + Removing jail process OK
+ + Executing poststop OK
+ * Stopping srv_3
+ + Executing prestop OK
+ + Stopping services OK
+ + Tearing down VNET OK
+ + Removing devfs_ruleset: 1002 OK
+ + Removing jail process OK
+ + Executing poststop OK
+ ansible_client is not running!
+
+ shell> iocage destroy -f srv_1 srv_2 srv_3
+ Destroying srv_1
+ Destroying srv_2
+ Destroying srv_3
+
+Create three VNET jails with a DHCP interface from the template *ansible_client*. Use the option ``--count``:
+
+.. code-block:: console
+
+ shell> iocage create --short --template ansible_client --count 3 bpf=1 dhcp=1 vnet=1
+ 1c11de2d successfully created!
+ 9d94cc9e successfully created!
+ 052b9557 successfully created!
+
+The names are random. Start the jails:
+
+.. code-block:: console
+
+ shell> iocage start ALL
+ No default gateway found for ipv6.
+ * Starting 052b9557
+ + Started OK
+ + Using devfs_ruleset: 1000 (iocage generated default)
+ + Configuring VNET OK
+ + Using IP options: vnet
+ + Starting services OK
+ + Executing poststart OK
+ + DHCP Address: 10.1.0.137/24
+ No default gateway found for ipv6.
+ * Starting 1c11de2d
+ + Started OK
+ + Using devfs_ruleset: 1001 (iocage generated default)
+ + Configuring VNET OK
+ + Using IP options: vnet
+ + Starting services OK
+ + Executing poststart OK
+ + DHCP Address: 10.1.0.146/24
+ No default gateway found for ipv6.
+ * Starting 9d94cc9e
+ + Started OK
+ + Using devfs_ruleset: 1002 (iocage generated default)
+ + Configuring VNET OK
+ + Using IP options: vnet
+ + Starting services OK
+ + Executing poststart OK
+ + DHCP Address: 10.1.0.115/24
+ Please convert back to a jail before trying to start ansible_client
+
+List the jails:
+
+.. code-block:: console
+
+ shell> iocage list -l
+ +-----+----------+------+-------+------+-----------------+--------------------+-----+----------------+----------+
+ | JID | NAME | BOOT | STATE | TYPE | RELEASE | IP4 | IP6 | TEMPLATE | BASEJAIL |
+ +=====+==========+======+=======+======+=================+====================+=====+================+==========+
+ | 207 | 052b9557 | off | up | jail | 14.2-RELEASE-p3 | epair0b|10.1.0.137 | - | ansible_client | no |
+ +-----+----------+------+-------+------+-----------------+--------------------+-----+----------------+----------+
+ | 208 | 1c11de2d | off | up | jail | 14.2-RELEASE-p3 | epair0b|10.1.0.146 | - | ansible_client | no |
+ +-----+----------+------+-------+------+-----------------+--------------------+-----+----------------+----------+
+ | 209 | 9d94cc9e | off | up | jail | 14.2-RELEASE-p3 | epair0b|10.1.0.115 | - | ansible_client | no |
+ +-----+----------+------+-------+------+-----------------+--------------------+-----+----------------+----------+
+
+Set notes. The tag *alias* will be used to create inventory aliases:
+
+.. code-block:: console
+
+ shell> iocage set notes="vmm=iocage_02 project=foo alias=srv_1" 052b9557
+ notes: none -> vmm=iocage_02 project=foo alias=srv_1
+ shell> iocage set notes="vmm=iocage_02 project=foo alias=srv_2" 1c11de2d
+ notes: none -> vmm=iocage_02 project=foo alias=srv_2
+ shell> iocage set notes="vmm=iocage_02 project=bar alias=srv_3" 9d94cc9e
+ notes: none -> vmm=iocage_02 project=bar alias=srv_3
+
+Update the inventory configuration. Set the option
+:ansopt:`community.general.iocage#inventory:inventory_hostname_tag` to :ansval:`alias`. This tag keeps the
+value of the alias. The option :ansopt:`community.general.iocage#inventory:get_properties` must be
+enabled. For example, ``hosts/02_iocage.yml`` contains:
+
+.. code-block:: yaml
+
+ plugin: community.general.iocage
+ host: 10.1.0.73
+ user: admin
+ get_properties: true
+ inventory_hostname_tag: alias
+ hooks_results:
+ - /var/db/dhclient-hook.address.epair0b
+ compose:
+ ansible_host: (iocage_hooks.0 == '-') | ternary(iocage_ip4, iocage_hooks.0)
+ iocage_tags: dict(iocage_properties.notes | split | map('split', '='))
+ keyed_groups:
+ - prefix: vmm
+ key: iocage_tags.vmm
+ - prefix: project
+ key: iocage_tags.project
+
+Display tags and groups. Create a playbook ``pb-test-groups.yml`` with the following content:
+
+.. code-block:: yaml+jinja
+
+ - hosts: all
+ remote_user: admin
+
+ vars:
+
+ ansible_python_interpreter: auto_silent
+
+ tasks:
+
+ - debug:
+ var: iocage_tags
+
+ - debug:
+ msg: |
+ {% for group in groups %}
+ {{ group }}: {{ groups[group] }}
+ {% endfor %}
+ run_once: true
+
+Run the playbook:
+
+.. code-block:: console
+
+ shell> ansible-playbook -i hosts/02_iocage.yml pb-test-groups.yml
+
+ PLAY [all] **********************************************************************************************************
+
+ TASK [debug] ********************************************************************************************************
+ ok: [srv_1] =>
+ iocage_tags:
+ alias: srv_1
+ project: foo
+ vmm: iocage_02
+ ok: [srv_2] =>
+ iocage_tags:
+ alias: srv_2
+ project: foo
+ vmm: iocage_02
+ ok: [srv_3] =>
+ iocage_tags:
+ alias: srv_3
+ project: bar
+ vmm: iocage_02
+
+ TASK [debug] ********************************************************************************************************
+ ok: [srv_1] =>
+ msg: |-
+ all: ['srv_1', 'srv_2', 'srv_3']
+ ungrouped: []
+ vmm_iocage_02: ['srv_1', 'srv_2', 'srv_3']
+ project_foo: ['srv_1', 'srv_2']
+ project_bar: ['srv_3']
+
+ PLAY RECAP **********************************************************************************************************
+ srv_1 : ok=2 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0
+ srv_2 : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0
+ srv_3 : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0
diff --git a/docs/docsite/rst/guide_iocage_inventory_basics.rst b/docs/docsite/rst/guide_iocage_inventory_basics.rst
new file mode 100644
index 0000000000..f198edc4f4
--- /dev/null
+++ b/docs/docsite/rst/guide_iocage_inventory_basics.rst
@@ -0,0 +1,128 @@
+..
+ Copyright (c) Ansible Project
+ GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+ SPDX-License-Identifier: GPL-3.0-or-later
+
+.. _ansible_collections.community.general.docsite.guide_iocage.guide_iocage_inventory.guide_iocage_inventory_basics:
+
+Basics
+------
+
+As root at the iocage host, create three VNET jails with a DHCP interface from the template
+*ansible_client*:
+
+.. code-block:: console
+
+ shell> iocage create --template ansible_client --name srv_1 bpf=1 dhcp=1 vnet=1
+ srv_1 successfully created!
+ shell> iocage create --template ansible_client --name srv_2 bpf=1 dhcp=1 vnet=1
+ srv_2 successfully created!
+ shell> iocage create --template ansible_client --name srv_3 bpf=1 dhcp=1 vnet=1
+ srv_3 successfully created!
+
+See: `Configuring a VNET Jail `_.
+
+As admin at the controller, list the jails:
+
+.. code-block:: console
+
+ shell> ssh admin@10.1.0.73 iocage list -l
+ +------+-------+------+-------+------+-----------------+--------------------+-----+----------------+----------+
+ | JID | NAME | BOOT | STATE | TYPE | RELEASE | IP4 | IP6 | TEMPLATE | BASEJAIL |
+ +======+=======+======+=======+======+=================+====================+=====+================+==========+
+ | None | srv_1 | off | down | jail | 14.2-RELEASE-p3 | DHCP (not running) | - | ansible_client | no |
+ +------+-------+------+-------+------+-----------------+--------------------+-----+----------------+----------+
+ | None | srv_2 | off | down | jail | 14.2-RELEASE-p3 | DHCP (not running) | - | ansible_client | no |
+ +------+-------+------+-------+------+-----------------+--------------------+-----+----------------+----------+
+ | None | srv_3 | off | down | jail | 14.2-RELEASE-p3 | DHCP (not running) | - | ansible_client | no |
+ +------+-------+------+-------+------+-----------------+--------------------+-----+----------------+----------+
+
+Create the inventory file ``hosts/02_iocage.yml``
+
+.. code-block:: yaml
+
+ plugin: community.general.iocage
+ host: 10.1.0.73
+ user: admin
+
+Display the inventory:
+
+.. code-block:: console
+
+ shell> ansible-inventory -i hosts/02_iocage.yml --list --yaml
+ all:
+ children:
+ ungrouped:
+ hosts:
+ srv_1:
+ iocage_basejail: 'no'
+ iocage_boot: 'off'
+ iocage_ip4: '-'
+ iocage_ip4_dict:
+ ip4: []
+ msg: DHCP (not running)
+ iocage_ip6: '-'
+ iocage_jid: None
+ iocage_release: 14.2-RELEASE-p3
+ iocage_state: down
+ iocage_template: ansible_client
+ iocage_type: jail
+ srv_2:
+ iocage_basejail: 'no'
+ iocage_boot: 'off'
+ iocage_ip4: '-'
+ iocage_ip4_dict:
+ ip4: []
+ msg: DHCP (not running)
+ iocage_ip6: '-'
+ iocage_jid: None
+ iocage_release: 14.2-RELEASE-p3
+ iocage_state: down
+ iocage_template: ansible_client
+ iocage_type: jail
+ srv_3:
+ iocage_basejail: 'no'
+ iocage_boot: 'off'
+ iocage_ip4: '-'
+ iocage_ip4_dict:
+ ip4: []
+ msg: DHCP (not running)
+ iocage_ip6: '-'
+ iocage_jid: None
+ iocage_release: 14.2-RELEASE-p3
+ iocage_state: down
+ iocage_template: ansible_client
+ iocage_type: jail
+
+Optionally, create shared IP jails:
+
+.. code-block:: console
+
+ shell> iocage create --template ansible_client --name srv_1 ip4_addr="em0|10.1.0.101/24"
+ srv_1 successfully created!
+ shell> iocage create --template ansible_client --name srv_2 ip4_addr="em0|10.1.0.102/24"
+ srv_2 successfully created!
+ shell> iocage create --template ansible_client --name srv_3 ip4_addr="em0|10.1.0.103/24"
+ srv_3 successfully created!
+ shell> iocage list -l
+ +------+-------+------+-------+------+-----------------+-------------------+-----+----------------+----------+
+ | JID | NAME | BOOT | STATE | TYPE | RELEASE | IP4 | IP6 | TEMPLATE | BASEJAIL |
+ +======+=======+======+=======+======+=================+===================+=====+================+==========+
+ | None | srv_1 | off | down | jail | 14.2-RELEASE-p3 | em0|10.1.0.101/24 | - | ansible_client | no |
+ +------+-------+------+-------+------+-----------------+-------------------+-----+----------------+----------+
+ | None | srv_2 | off | down | jail | 14.2-RELEASE-p3 | em0|10.1.0.102/24 | - | ansible_client | no |
+ +------+-------+------+-------+------+-----------------+-------------------+-----+----------------+----------+
+ | None | srv_3 | off | down | jail | 14.2-RELEASE-p3 | em0|10.1.0.103/24 | - | ansible_client | no |
+ +------+-------+------+-------+------+-----------------+-------------------+-----+----------------+----------+
+
+See: `Configuring a Shared IP Jail `_
+
+If iocage needs environment variable(s), use the option :ansopt:`community.general.iocage#inventory:env`. For example,
+
+.. code-block:: yaml
+
+ plugin: community.general.iocage
+ host: 10.1.0.73
+ user: admin
+ env:
+ CRYPTOGRAPHY_OPENSSL_NO_LEGACY: 1
diff --git a/docs/docsite/rst/guide_iocage_inventory_dhcp.rst b/docs/docsite/rst/guide_iocage_inventory_dhcp.rst
new file mode 100644
index 0000000000..3c37366ca6
--- /dev/null
+++ b/docs/docsite/rst/guide_iocage_inventory_dhcp.rst
@@ -0,0 +1,175 @@
+..
+ Copyright (c) Ansible Project
+ GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+ SPDX-License-Identifier: GPL-3.0-or-later
+
+.. _ansible_collections.community.general.docsite.guide_iocage.guide_iocage_inventory.guide_iocage_inventory_dhcp:
+
+DHCP
+----
+
+As root at the iocage host, start the jails:
+
+.. code-block:: console
+
+ shell> iocage start ALL
+ No default gateway found for ipv6.
+ * Starting srv_1
+ + Started OK
+ + Using devfs_ruleset: 1000 (iocage generated default)
+ + Configuring VNET OK
+ + Using IP options: vnet
+ + Starting services OK
+ + Executing poststart OK
+ + DHCP Address: 10.1.0.183/24
+ No default gateway found for ipv6.
+ * Starting srv_2
+ + Started OK
+ + Using devfs_ruleset: 1001 (iocage generated default)
+ + Configuring VNET OK
+ + Using IP options: vnet
+ + Starting services OK
+ + Executing poststart OK
+ + DHCP Address: 10.1.0.204/24
+ No default gateway found for ipv6.
+ * Starting srv_3
+ + Started OK
+ + Using devfs_ruleset: 1002 (iocage generated default)
+ + Configuring VNET OK
+ + Using IP options: vnet
+ + Starting services OK
+ + Executing poststart OK
+ + DHCP Address: 10.1.0.169/24
+ Please convert back to a jail before trying to start ansible_client
+
+List the jails:
+
+.. code-block:: console
+
+ shell> iocage list -l
+ +-----+-------+------+-------+------+-----------------+--------------------+-----+----------------+----------+
+ | JID | NAME | BOOT | STATE | TYPE | RELEASE | IP4 | IP6 | TEMPLATE | BASEJAIL |
+ +=====+=======+======+=======+======+=================+====================+=====+================+==========+
+ | 204 | srv_1 | off | up | jail | 14.2-RELEASE-p3 | epair0b|10.1.0.183 | - | ansible_client | no |
+ +-----+-------+------+-------+------+-----------------+--------------------+-----+----------------+----------+
+ | 205 | srv_2 | off | up | jail | 14.2-RELEASE-p3 | epair0b|10.1.0.204 | - | ansible_client | no |
+ +-----+-------+------+-------+------+-----------------+--------------------+-----+----------------+----------+
+ | 206 | srv_3 | off | up | jail | 14.2-RELEASE-p3 | epair0b|10.1.0.169 | - | ansible_client | no |
+ +-----+-------+------+-------+------+-----------------+--------------------+-----+----------------+----------+
+
+As admin at the controller, list the jails. The IP4 tab says "... address requires root":
+
+.. code-block:: console
+
+ shell> ssh admin@10.1.0.73 iocage list -l
+ +-----+-------+------+-------+------+-----------------+-----------------------------------------+-----+----------------+----------+
+ | JID | NAME | BOOT | STATE | TYPE | RELEASE | IP4 | IP6 | TEMPLATE | BASEJAIL |
+ +=====+=======+======+=======+======+=================+=========================================+=====+================+==========+
+ | 204 | srv_1 | off | up | jail | 14.2-RELEASE-p3 | DHCP (running -- address requires root) | - | ansible_client | no |
+ +-----+-------+------+-------+------+-----------------+-----------------------------------------+-----+----------------+----------+
+ | 205 | srv_2 | off | up | jail | 14.2-RELEASE-p3 | DHCP (running -- address requires root) | - | ansible_client | no |
+ +-----+-------+------+-------+------+-----------------+-----------------------------------------+-----+----------------+----------+
+ | 206 | srv_3 | off | up | jail | 14.2-RELEASE-p3 | DHCP (running -- address requires root) | - | ansible_client | no |
+ +-----+-------+------+-------+------+-----------------+-----------------------------------------+-----+----------------+----------+
+
+Use sudo if enabled:
+
+.. code-block:: console
+
+ shell> ssh admin@10.1.0.73 sudo iocage list -l
+ +-----+-------+------+-------+------+-----------------+--------------------+-----+----------------+----------+
+ | JID | NAME | BOOT | STATE | TYPE | RELEASE | IP4 | IP6 | TEMPLATE | BASEJAIL |
+ +=====+=======+======+=======+======+=================+====================+=====+================+==========+
+ | 204 | srv_1 | off | up | jail | 14.2-RELEASE-p3 | epair0b|10.1.0.183 | - | ansible_client | no |
+ +-----+-------+------+-------+------+-----------------+--------------------+-----+----------------+----------+
+ | 205 | srv_2 | off | up | jail | 14.2-RELEASE-p3 | epair0b|10.1.0.204 | - | ansible_client | no |
+ +-----+-------+------+-------+------+-----------------+--------------------+-----+----------------+----------+
+ | 206 | srv_3 | off | up | jail | 14.2-RELEASE-p3 | epair0b|10.1.0.169 | - | ansible_client | no |
+ +-----+-------+------+-------+------+-----------------+--------------------+-----+----------------+----------+
+
+Create the inventory file ``hosts/02_iocage.yml``. Use the option
+:ansopt:`community.general.iocage#inventory:sudo`:
+
+.. code-block:: yaml
+
+ plugin: community.general.iocage
+ host: 10.1.0.73
+ user: admin
+ sudo: true
+
+Display the inventory:
+
+.. code-block:: console
+
+ shell> ansible-inventory -i hosts/02_iocage.yml --list --yaml
+ all:
+ children:
+ ungrouped:
+ hosts:
+ srv_1:
+ iocage_basejail: 'no'
+ iocage_boot: 'off'
+ iocage_ip4: 10.1.0.183
+ iocage_ip4_dict:
+ ip4:
+ - ifc: epair0b
+ ip: 10.1.0.183
+ mask: '-'
+ msg: ''
+ iocage_ip6: '-'
+ iocage_jid: '204'
+ iocage_release: 14.2-RELEASE-p3
+ iocage_state: up
+ iocage_template: ansible_client
+ iocage_type: jail
+ srv_2:
+ iocage_basejail: 'no'
+ iocage_boot: 'off'
+ iocage_ip4: 10.1.0.204
+ iocage_ip4_dict:
+ ip4:
+ - ifc: epair0b
+ ip: 10.1.0.204
+ mask: '-'
+ msg: ''
+ iocage_ip6: '-'
+ iocage_jid: '205'
+ iocage_release: 14.2-RELEASE-p3
+ iocage_state: up
+ iocage_template: ansible_client
+ iocage_type: jail
+ srv_3:
+ iocage_basejail: 'no'
+ iocage_boot: 'off'
+ iocage_ip4: 10.1.0.169
+ iocage_ip4_dict:
+ ip4:
+ - ifc: epair0b
+ ip: 10.1.0.169
+ mask: '-'
+ msg: ''
+ iocage_ip6: '-'
+ iocage_jid: '206'
+ iocage_release: 14.2-RELEASE-p3
+ iocage_state: up
+ iocage_template: ansible_client
+ iocage_type: jail
+
+Note: If the option :ansopt:`community.general.iocage#inventory:env` is used and :ansopt:`community.general.iocage#inventory:sudo` is enabled, enable also :ansopt:`community.general.iocage#inventory:sudo_preserve_env`. For example,
+
+.. code-block:: yaml
+
+ plugin: community.general.iocage
+ host: 10.1.0.73
+ user: admin
+ env:
+ CRYPTOGRAPHY_OPENSSL_NO_LEGACY: 1
+ sudo: true
+ sudo_preserve_env: true
+
+In this case, make sure the sudo tag ``SETENV`` is used:
+
+.. code-block:: console
+
+ shell> ssh admin@10.1.0.73 sudo cat /usr/local/etc/sudoers | grep admin
+ admin ALL=(ALL) NOPASSWD:SETENV: ALL
diff --git a/docs/docsite/rst/guide_iocage_inventory_hooks.rst b/docs/docsite/rst/guide_iocage_inventory_hooks.rst
new file mode 100644
index 0000000000..45364fc798
--- /dev/null
+++ b/docs/docsite/rst/guide_iocage_inventory_hooks.rst
@@ -0,0 +1,187 @@
+..
+ Copyright (c) Ansible Project
+ GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+ SPDX-License-Identifier: GPL-3.0-or-later
+
+.. _ansible_collections.community.general.docsite.guide_iocage.guide_iocage_inventory.guide_iocage_inventory_hooks:
+
+Hooks
+-----
+
+The iocage utility internally opens a console to a jail to get the jail's DHCP address. This
+requires root. If you run the command ``iocage list -l`` as unprivileged user, you'll see the
+message ``DHCP (running -- address requires root)``. If you are not granted the root privilege, use
+``/etc/dhclient-exit-hooks``. For example, in the jail *srv_1*, create the file
+``/zroot/iocage/jails/srv_1/root/etc/dhclient-exit-hooks``
+
+.. code-block:: shell
+
+ case "$reason" in
+ "BOUND"|"REBIND"|"REBOOT"|"RENEW")
+ echo $new_ip_address > /var/db/dhclient-hook.address.$interface
+ ;;
+ esac
+
+where ``/zroot/iocage`` is the activated pool.
+
+.. code-block:: console
+
+ shell> zfs list | grep /zroot/iocage
+ zroot/iocage 4.69G 446G 5.08M /zroot/iocage
+ zroot/iocage/download 927M 446G 384K /zroot/iocage/download
+ zroot/iocage/download/14.1-RELEASE 465M 446G 465M /zroot/iocage/download/14.1-RELEASE
+ zroot/iocage/download/14.2-RELEASE 462M 446G 462M /zroot/iocage/download/14.2-RELEASE
+ zroot/iocage/images 384K 446G 384K /zroot/iocage/images
+ zroot/iocage/jails 189M 446G 480K /zroot/iocage/jails
+ zroot/iocage/jails/srv_1 62.9M 446G 464K /zroot/iocage/jails/srv_1
+ zroot/iocage/jails/srv_1/root 62.4M 446G 3.53G /zroot/iocage/jails/srv_1/root
+ zroot/iocage/jails/srv_2 62.8M 446G 464K /zroot/iocage/jails/srv_2
+ zroot/iocage/jails/srv_2/root 62.3M 446G 3.53G /zroot/iocage/jails/srv_2/root
+ zroot/iocage/jails/srv_3 62.8M 446G 464K /zroot/iocage/jails/srv_3
+ zroot/iocage/jails/srv_3/root 62.3M 446G 3.53G /zroot/iocage/jails/srv_3/root
+ zroot/iocage/log 688K 446G 688K /zroot/iocage/log
+ zroot/iocage/releases 2.93G 446G 384K /zroot/iocage/releases
+ zroot/iocage/releases/14.2-RELEASE 2.93G 446G 384K /zroot/iocage/releases/14.2-RELEASE
+ zroot/iocage/releases/14.2-RELEASE/root 2.93G 446G 2.88G /zroot/iocage/releases/14.2-RELEASE/root
+ zroot/iocage/templates 682M 446G 416K /zroot/iocage/templates
+ zroot/iocage/templates/ansible_client 681M 446G 432K /zroot/iocage/templates/ansible_client
+ zroot/iocage/templates/ansible_client/root 681M 446G 3.53G /zroot/iocage/templates/ansible_client/root
+
+See: `man dhclient-script `_
+
+Create the inventory configuration. Use the option :ansopt:`community.general.iocage#inventory:hooks_results` instead of :ansopt:`community.general.iocage#inventory:sudo`:
+
+.. code-block:: console
+
+ shell> cat hosts/02_iocage.yml
+
+.. code-block:: yaml
+
+ plugin: community.general.iocage
+ host: 10.1.0.73
+ user: admin
+ hooks_results:
+ - /var/db/dhclient-hook.address.epair0b
+
+.. note::
+
+ The option :ansopt:`community.general.iocage#inventory:hooks_results` expects the poolname to be mounted to ``/poolname``. For example, if you
+ activate the pool iocage, this plugin expects to find the :ansopt:`community.general.iocage#inventory:hooks_results` items in the path
+ /iocage/iocage/jails//root. If you mount the poolname to a different path, the easiest
+ remedy is to create a symlink.
+
+As admin at the controller, display the inventory:
+
+.. code-block:: console
+
+ shell> ansible-inventory -i hosts/02_iocage.yml --list --yaml
+ all:
+ children:
+ ungrouped:
+ hosts:
+ srv_1:
+ iocage_basejail: 'no'
+ iocage_boot: 'off'
+ iocage_hooks:
+ - 10.1.0.183
+ iocage_ip4: '-'
+ iocage_ip4_dict:
+ ip4: []
+ msg: DHCP (running -- address requires root)
+ iocage_ip6: '-'
+ iocage_jid: '204'
+ iocage_release: 14.2-RELEASE-p3
+ iocage_state: up
+ iocage_template: ansible_client
+ iocage_type: jail
+ srv_2:
+ iocage_basejail: 'no'
+ iocage_boot: 'off'
+ iocage_hooks:
+ - 10.1.0.204
+ iocage_ip4: '-'
+ iocage_ip4_dict:
+ ip4: []
+ msg: DHCP (running -- address requires root)
+ iocage_ip6: '-'
+ iocage_jid: '205'
+ iocage_release: 14.2-RELEASE-p3
+ iocage_state: up
+ iocage_template: ansible_client
+ iocage_type: jail
+ srv_3:
+ iocage_basejail: 'no'
+ iocage_boot: 'off'
+ iocage_hooks:
+ - 10.1.0.169
+ iocage_ip4: '-'
+ iocage_ip4_dict:
+ ip4: []
+ msg: DHCP (running -- address requires root)
+ iocage_ip6: '-'
+ iocage_jid: '206'
+ iocage_release: 14.2-RELEASE-p3
+ iocage_state: up
+ iocage_template: ansible_client
+ iocage_type: jail
+
+Compose the variable ``ansible_host``. For example, ``hosts/02_iocage.yml`` could look like:
+
+.. code-block:: yaml+jinja
+
+ plugin: community.general.iocage
+ host: 10.1.0.73
+ user: admin
+ hooks_results:
+ - /var/db/dhclient-hook.address.epair0b
+ compose:
+ ansible_host: (iocage_hooks.0 == '-') | ternary(iocage_ip4, iocage_hooks.0)
+
+Test the jails. Create a playbook ``pb-test-uname.yml``:
+
+.. code-block:: yaml
+
+ - hosts: all
+ remote_user: admin
+
+ vars:
+
+ ansible_python_interpreter: auto_silent
+
+ tasks:
+
+ - command: uname -a
+ register: out
+
+ - debug:
+ var: out.stdout
+
+See: :ref:`working_with_bsd`
+
+Run the playbook:
+
+.. code-block:: console
+
+ shell> ansible-playbook -i hosts/02_iocage.yml pb-test-uname.yml
+
+ PLAY [all] **********************************************************************************************************
+
+ TASK [command] ******************************************************************************************************
+ changed: [srv_3]
+ changed: [srv_1]
+ changed: [srv_2]
+
+ TASK [debug] ********************************************************************************************************
+ ok: [srv_1] =>
+ out.stdout: FreeBSD srv-1 14.2-RELEASE-p1 FreeBSD 14.2-RELEASE-p1 GENERIC amd64
+ ok: [srv_3] =>
+ out.stdout: FreeBSD srv-3 14.2-RELEASE-p1 FreeBSD 14.2-RELEASE-p1 GENERIC amd64
+ ok: [srv_2] =>
+ out.stdout: FreeBSD srv-2 14.2-RELEASE-p1 FreeBSD 14.2-RELEASE-p1 GENERIC amd64
+
+ PLAY RECAP **********************************************************************************************************
+ srv_1 : ok=2 changed=1 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0
+ srv_2 : ok=2 changed=1 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0
+ srv_3 : ok=2 changed=1 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0
+
+Note: This playbook and the inventory configuration works also for the *Shared IP Jails*.
diff --git a/docs/docsite/rst/guide_iocage_inventory_properties.rst b/docs/docsite/rst/guide_iocage_inventory_properties.rst
new file mode 100644
index 0000000000..d044f2e7f2
--- /dev/null
+++ b/docs/docsite/rst/guide_iocage_inventory_properties.rst
@@ -0,0 +1,201 @@
+..
+ Copyright (c) Ansible Project
+ GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+ SPDX-License-Identifier: GPL-3.0-or-later
+
+.. _ansible_collections.community.general.docsite.guide_iocage.guide_iocage_inventory.guide_iocage_inventory_properties:
+
+Properties
+----------
+
+Optionally, in the inventory file ``hosts/02_iocage.yml``, get the iocage properties. Enable
+:ansopt:`community.general.iocage#inventory:get_properties`:
+
+.. code-block:: yaml+jinja
+
+ plugin: community.general.iocage
+ host: 10.1.0.73
+ user: admin
+ get_properties: true
+ hooks_results:
+ - /var/db/dhclient-hook.address.epair0b
+ compose:
+ ansible_host: (iocage_hooks.0 == '-') | ternary(iocage_ip4, iocage_hooks.0)
+
+Display the properties. Create the playbook ``pb-test-properties.yml``:
+
+.. code-block:: yaml
+
+ - hosts: all
+ remote_user: admin
+
+ vars:
+
+ ansible_python_interpreter: auto_silent
+
+ tasks:
+
+ - debug:
+ var: iocage_properties
+
+Run the playbook. Limit the inventory to *srv_3*:
+
+.. code-block:: console
+
+ shell> ansible-playbook -i hosts/02_iocage.yml -l srv_3 pb-test-properties.yml
+
+ PLAY [all] **********************************************************************************************************
+
+ TASK [debug] ********************************************************************************************************
+ ok: [srv_3] =>
+ iocage_properties:
+ CONFIG_VERSION: '33'
+ allow_chflags: '0'
+ allow_mlock: '0'
+ allow_mount: '1'
+ allow_mount_devfs: '0'
+ allow_mount_fdescfs: '0'
+ allow_mount_fusefs: '0'
+ allow_mount_linprocfs: '0'
+ allow_mount_linsysfs: '0'
+ allow_mount_nullfs: '0'
+ allow_mount_procfs: '0'
+ allow_mount_tmpfs: '0'
+ allow_mount_zfs: '0'
+ allow_nfsd: '0'
+ allow_quotas: '0'
+ allow_raw_sockets: '0'
+ allow_set_hostname: '1'
+ allow_socket_af: '0'
+ allow_sysvipc: '0'
+ allow_tun: '0'
+ allow_vmm: '0'
+ assign_localhost: '0'
+ available: readonly
+ basejail: '0'
+ boot: '0'
+ bpf: '1'
+ children_max: '0'
+ cloned_release: 14.2-RELEASE
+ comment: none
+ compression: 'on'
+ compressratio: readonly
+ coredumpsize: 'off'
+ count: '1'
+ cpuset: 'off'
+ cputime: 'off'
+ datasize: 'off'
+ dedup: 'off'
+ defaultrouter: auto
+ defaultrouter6: auto
+ depends: none
+ devfs_ruleset: '4'
+ dhcp: '1'
+ enforce_statfs: '2'
+ exec_clean: '1'
+ exec_created: /usr/bin/true
+ exec_fib: '0'
+ exec_jail_user: root
+ exec_poststart: /usr/bin/true
+ exec_poststop: /usr/bin/true
+ exec_prestart: /usr/bin/true
+ exec_prestop: /usr/bin/true
+ exec_start: /bin/sh /etc/rc
+ exec_stop: /bin/sh /etc/rc.shutdown
+ exec_system_jail_user: '0'
+ exec_system_user: root
+ exec_timeout: '60'
+ host_domainname: none
+ host_hostname: srv-3
+ host_hostuuid: srv_3
+ host_time: '1'
+ hostid: ea2ba7d1-4fcd-f13f-82e4-8b32c0a03403
+ hostid_strict_check: '0'
+ interfaces: vnet0:bridge0
+ ip4: new
+ ip4_addr: none
+ ip4_saddrsel: '1'
+ ip6: new
+ ip6_addr: none
+ ip6_saddrsel: '1'
+ ip_hostname: '0'
+ jail_zfs: '0'
+ jail_zfs_dataset: iocage/jails/srv_3/data
+ jail_zfs_mountpoint: none
+ last_started: '2025-06-11 04:29:23'
+ localhost_ip: none
+ login_flags: -f root
+ mac_prefix: 02a098
+ maxproc: 'off'
+ memorylocked: 'off'
+ memoryuse: 'off'
+ min_dyn_devfs_ruleset: '1000'
+ mount_devfs: '1'
+ mount_fdescfs: '1'
+ mount_linprocfs: '0'
+ mount_procfs: '0'
+ mountpoint: readonly
+ msgqqueued: 'off'
+ msgqsize: 'off'
+ nat: '0'
+ nat_backend: ipfw
+ nat_forwards: none
+ nat_interface: none
+ nat_prefix: '172.16'
+ nmsgq: 'off'
+ notes: none
+ nsem: 'off'
+ nsemop: 'off'
+ nshm: 'off'
+ nthr: 'off'
+ openfiles: 'off'
+ origin: readonly
+ owner: root
+ pcpu: 'off'
+ plugin_name: none
+ plugin_repository: none
+ priority: '99'
+ pseudoterminals: 'off'
+ quota: none
+ readbps: 'off'
+ readiops: 'off'
+ release: 14.2-RELEASE-p3
+ reservation: none
+ resolver: /etc/resolv.conf
+ rlimits: 'off'
+ rtsold: '0'
+ securelevel: '2'
+ shmsize: 'off'
+ source_template: ansible_client
+ stacksize: 'off'
+ state: up
+ stop_timeout: '30'
+ swapuse: 'off'
+ sync_state: none
+ sync_target: none
+ sync_tgt_zpool: none
+ sysvmsg: new
+ sysvsem: new
+ sysvshm: new
+ template: '0'
+ type: jail
+ used: readonly
+ vmemoryuse: 'off'
+ vnet: '1'
+ vnet0_mac: 02a0983da05d 02a0983da05e
+ vnet0_mtu: auto
+ vnet1_mac: none
+ vnet1_mtu: auto
+ vnet2_mac: none
+ vnet2_mtu: auto
+ vnet3_mac: none
+ vnet3_mtu: auto
+ vnet_default_interface: auto
+ vnet_default_mtu: '1500'
+ vnet_interfaces: none
+ wallclock: 'off'
+ writebps: 'off'
+ writeiops: 'off'
+
+ PLAY RECAP **********************************************************************************************************
+ srv_3 : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0
diff --git a/docs/docsite/rst/guide_iocage_inventory_tags.rst b/docs/docsite/rst/guide_iocage_inventory_tags.rst
new file mode 100644
index 0000000000..afb645198c
--- /dev/null
+++ b/docs/docsite/rst/guide_iocage_inventory_tags.rst
@@ -0,0 +1,117 @@
+..
+ Copyright (c) Ansible Project
+ GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+ SPDX-License-Identifier: GPL-3.0-or-later
+
+.. _ansible_collections.community.general.docsite.guide_iocage.guide_iocage_inventory.guide_iocage_inventory_tags:
+
+Tags
+----
+
+Quoting `man iocage `_
+
+.. code-block:: text
+
+ PROPERTIES
+ ...
+ notes="any string"
+ Custom notes for miscellaneous tagging.
+ Default: none
+ Source: local
+
+We will use the format `notes="tag1=value1 tag2=value2 ..."`.
+
+.. note::
+
+ The iocage tags have nothing to do with the :ref:`tags`.
+
+As root at the iocage host, set notes. For example,
+
+.. code-block:: console
+
+ shell> iocage set notes="vmm=iocage_02 project=foo" srv_1
+ notes: none -> vmm=iocage_02 project=foo
+ shell> iocage set notes="vmm=iocage_02 project=foo" srv_2
+ notes: none -> vmm=iocage_02 project=foo
+ shell> iocage set notes="vmm=iocage_02 project=bar" srv_3
+ notes: none -> vmm=iocage_02 project=bar
+
+Update the inventory configuration. Compose a dictionary *iocage_tags* and create groups. The option
+:ansopt:`community.general.iocage#inventory:get_properties` must be enabled.
+For example, ``hosts/02_iocage.yml`` could look like:
+
+.. code-block:: yaml
+
+ plugin: community.general.iocage
+ host: 10.1.0.73
+ user: admin
+ get_properties: true
+ hooks_results:
+ - /var/db/dhclient-hook.address.epair0b
+ compose:
+ ansible_host: (iocage_hooks.0 == '-') | ternary(iocage_ip4, iocage_hooks.0)
+ iocage_tags: dict(iocage_properties.notes | split | map('split', '='))
+ keyed_groups:
+ - prefix: vmm
+ key: iocage_tags.vmm
+ - prefix: project
+ key: iocage_tags.project
+
+Display tags and groups. Create a playbook ``pb-test-groups.yml``:
+
+.. code-block:: yaml+jinja
+
+ - hosts: all
+ remote_user: admin
+
+ vars:
+
+ ansible_python_interpreter: auto_silent
+
+ tasks:
+
+ - debug:
+ var: iocage_tags
+
+ - debug:
+ msg: |
+ {% for group in groups %}
+ {{ group }}: {{ groups[group] }}
+ {% endfor %}
+ run_once: true
+
+Run the playbook:
+
+.. code-block:: console
+
+ shell> ansible-playbook -i hosts/02_iocage.yml pb-test-groups.yml
+
+ PLAY [all] **********************************************************************************************************
+
+ TASK [debug] ********************************************************************************************************
+ ok: [srv_1] =>
+ iocage_tags:
+ project: foo
+ vmm: iocage_02
+ ok: [srv_2] =>
+ iocage_tags:
+ project: foo
+ vmm: iocage_02
+ ok: [srv_3] =>
+ iocage_tags:
+ project: bar
+ vmm: iocage_02
+
+ TASK [debug] ********************************************************************************************************
+ ok: [srv_1] =>
+ msg: |-
+ all: ['srv_1', 'srv_2', 'srv_3']
+ ungrouped: []
+ vmm_iocage_02: ['srv_1', 'srv_2', 'srv_3']
+ project_foo: ['srv_1', 'srv_2']
+ project_bar: ['srv_3']
+
+ PLAY RECAP **********************************************************************************************************
+ srv_1 : ok=2 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0
+ srv_2 : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0
+ srv_3 : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0
diff --git a/docs/docsite/rst/guide_modulehelper.rst b/docs/docsite/rst/guide_modulehelper.rst
index 68b46e6c94..12657f4479 100644
--- a/docs/docsite/rst/guide_modulehelper.rst
+++ b/docs/docsite/rst/guide_modulehelper.rst
@@ -38,7 +38,6 @@ But bear in mind that it does not showcase all of MH's features:
),
supports_check_mode=True,
)
- use_old_vardict = False
def __run__(self):
self.vars.original_message = ''
@@ -76,13 +75,14 @@ section above, but there are more elements that will take part in it.
from ansible_collections.community.general.plugins.module_utils.module_helper import ModuleHelper
class MyTest(ModuleHelper):
+ # behavior for module paramaters ONLY, see below for further information
output_params = ()
change_params = ()
diff_params = ()
- facts_name = None
facts_params = ()
- use_old_vardict = True
- mute_vardict_deprecation = False
+
+ facts_name = None # used if generating facts, from parameters or otherwise
+
module = dict(
argument_spec=dict(...),
# ...
@@ -202,27 +202,14 @@ By using ``self.vars``, you get a central mechanism to access the parameters but
As described in :ref:`ansible_collections.community.general.docsite.guide_vardict`, variables in ``VarDict`` have metadata associated to them.
One of the attributes in that metadata marks the variable for output, and MH makes use of that to generate the module's return values.
-.. important::
+.. note::
- The ``VarDict`` feature described was introduced in community.general 7.1.0, but there was a first
- implementation of it embedded within ``ModuleHelper``.
- That older implementation is now deprecated and will be removed in community.general 11.0.0.
- After community.general 7.1.0, MH modules generate a deprecation message about *using the old VarDict*.
- There are two ways to prevent that from happening:
+ The ``VarDict`` class was introduced in community.general 7.1.0, as part of ``ModuleHelper`` itself.
+ However, it has been factored out to become an utility on its own, described in :ref:`ansible_collections.community.general.docsite.guide_vardict`,
+ and the older implementation was removed in community.general 11.0.0.
- #. Set ``mute_vardict_deprecation = True`` and the deprecation will be silenced. If the module still uses the old ``VarDict``,
- it will not be able to update to community.general 11.0.0 (Spring 2026) upon its release.
- #. Set ``use_old_vardict = False`` to make the MH module use the new ``VarDict`` immediatelly.
- The new ``VarDict`` and its use is documented and this is the recommended way to handle this.
-
- .. code-block:: python
-
- class MyTest(ModuleHelper):
- use_old_vardict = False
- mute_vardict_deprecation = True
- ...
-
- These two settings are mutually exclusive, but that is not enforced and the behavior when setting both is not specified.
+ Some code might still refer to the class variables ``use_old_vardict`` and ``mute_vardict_deprecation``, used for the transtition to the new
+ implementation but from community.general 11.0.0 onwards they are no longer used and can be safely removed from the code.
Contrary to new variables created in ``VarDict``, module parameters are not set for output by default.
If you want to include some module parameters in the output, list them in the ``output_params`` class variable.
@@ -233,6 +220,11 @@ If you want to include some module parameters in the output, list them in the ``
output_params = ('state', 'name')
...
+.. important::
+
+ The variable names listed in ``output_params`` **must be module parameters**, as in parameters listed in the module's ``argument_spec``.
+ Names not found in ``argument_spec`` are silently ignored.
+
Another neat feature provided by MH by using ``VarDict`` is the automatic tracking of changes when setting the metadata ``change=True``.
Again, to enable this feature for module parameters, you must list them in the ``change_params`` class variable.
@@ -243,6 +235,11 @@ Again, to enable this feature for module parameters, you must list them in the `
change_params = ('value', )
...
+.. important::
+
+ The variable names listed in ``change_params`` **must be module parameters**, as in parameters listed in the module's ``argument_spec``.
+ Names not found in ``argument_spec`` are silently ignored.
+
.. seealso::
See more about this in
@@ -260,6 +257,11 @@ With that, MH will automatically generate the diff output for variables that hav
# example from community.general.gio_mime
self.vars.set_meta("handler", initial_value=gio_mime_get(self.runner, self.vars.mime_type), diff=True, change=True)
+.. important::
+
+ The variable names listed in ``diff_params`` **must be module parameters**, as in parameters listed in the module's ``argument_spec``.
+ Names not found in ``argument_spec`` are silently ignored.
+
Moreover, if a module is set to return *facts* instead of return values, then again use the metadata ``fact=True`` and ``fact_params`` for module parameters.
Additionally, you must specify ``facts_name``, as in:
@@ -283,6 +285,11 @@ That generates an Ansible fact like:
debug:
msg: Volume fact is {{ ansible_facts.volume_facts.volume }}
+.. important::
+
+ The variable names listed in ``fact_params`` **must be module parameters**, as in parameters listed in the module's ``argument_spec``.
+ Names not found in ``argument_spec`` are silently ignored.
+
.. important::
If ``facts_name`` is not set, the module does not generate any facts.
@@ -346,6 +353,8 @@ However, you can set output variables specifically for that exception, if you so
.. code-block:: python
+ from ansible_collections.community.general.plugins.module_utils.module_helper import ModuleHelperException
+
def __init_module__(self):
if not complex_validation():
self.do_raise("Validation failed!")
@@ -354,11 +363,16 @@ However, you can set output variables specifically for that exception, if you so
awesomeness = calculate_awesomeness()
if awesomeness > 1000:
self.do_raise("Over awesome, I cannot handle it!", update_output={"awesomeness": awesomeness})
+ # which is just a convenience shortcut for
+ raise ModuleHelperException("...", update_output={...})
All exceptions derived from ``Exception`` are captured and translated into a ``fail_json()`` call.
However, if you do want to call ``self.module.fail_json()`` yourself it will work,
just keep in mind that there will be no automatic handling of output variables in that case.
+Behind the curtains, all ``do_raise()`` does is to raise a ``ModuleHelperException``.
+If you want to create specialized error handling for your code, the best way is to extend that clas and raise it when needed.
+
.. _ansible_collections.community.general.docsite.guide_modulehelper.statemh:
StateModuleHelper
@@ -377,7 +391,6 @@ By using ``StateModuleHelper`` you can make your code like the excerpt from the
module = dict(
...
)
- use_old_vardict = False
def __init_module__(self):
self.runner = gconftool2_runner(self.module, check_rc=True)
@@ -461,6 +474,11 @@ Additionally, MH will also delegate:
- ``diff_mode`` to ``self.module._diff``
- ``verbosity`` to ``self.module._verbosity``
+Starting in community.general 10.3.0, MH will also delegate the method ``debug`` to ``self.module``.
+If any existing module already has a ``debug`` attribute defined, a warning message will be generated,
+requesting it to be renamed. Upon the release of community.general 12.0.0, the delegation will be
+preemptive and will override any existing method or property in the subclasses.
+
Decorators
""""""""""
diff --git a/docs/docsite/rst/guide_packet.rst b/docs/docsite/rst/guide_packet.rst
index 9de5e3f614..95b38dddd0 100644
--- a/docs/docsite/rst/guide_packet.rst
+++ b/docs/docsite/rst/guide_packet.rst
@@ -67,16 +67,16 @@ The following code block is a simple playbook that creates one `Type 0
- hostnames: myserver
- operating_system: ubuntu_16_04
- plan: baremetal_0
- facility: sjc1
+ - community.general.packet_device:
+ project_id:
+ hostnames: myserver
+ operating_system: ubuntu_16_04
+ plan: baremetal_0
+ facility: sjc1
After running ``ansible-playbook playbook_create.yml``, you should have a server provisioned on Packet. You can verify through a CLI or in the `Packet portal `__.
@@ -110,10 +110,10 @@ If your playbook acts on existing Packet devices, you can only pass the ``hostna
hosts: localhost
tasks:
- - community.general.packet_device:
- project_id:
- hostnames: myserver
- state: rebooted
+ - community.general.packet_device:
+ project_id:
+ hostnames: myserver
+ state: rebooted
You can also identify specific Packet devices with the ``device_ids`` parameter. The device's UUID can be found in the `Packet Portal `_ or by using a `CLI `_. The following playbook removes a Packet device using the ``device_ids`` field:
@@ -125,10 +125,10 @@ You can also identify specific Packet devices with the ``device_ids`` parameter.
hosts: localhost
tasks:
- - community.general.packet_device:
- project_id:
- device_ids:
- state: absent
+ - community.general.packet_device:
+ project_id:
+ device_ids:
+ state: absent
More Complex Playbooks
@@ -153,43 +153,43 @@ The following playbook will create an SSH key, 3 Packet servers, and then wait u
hosts: localhost
tasks:
- - community.general.packet_sshkey:
- key_file: ./id_rsa.pub
- label: new
+ - community.general.packet_sshkey:
+ key_file: ./id_rsa.pub
+ label: new
- - community.general.packet_device:
- hostnames: [coreos-one, coreos-two, coreos-three]
- operating_system: coreos_beta
- plan: baremetal_0
- facility: ewr1
- project_id:
- wait_for_public_IPv: 4
- user_data: |
- #cloud-config
- coreos:
- etcd2:
- discovery: https://discovery.etcd.io/
- advertise-client-urls: http://$private_ipv4:2379,http://$private_ipv4:4001
- initial-advertise-peer-urls: http://$private_ipv4:2380
- listen-client-urls: http://0.0.0.0:2379,http://0.0.0.0:4001
- listen-peer-urls: http://$private_ipv4:2380
- fleet:
- public-ip: $private_ipv4
- units:
- - name: etcd2.service
- command: start
- - name: fleet.service
- command: start
- register: newhosts
+ - community.general.packet_device:
+ hostnames: [coreos-one, coreos-two, coreos-three]
+ operating_system: coreos_beta
+ plan: baremetal_0
+ facility: ewr1
+ project_id:
+ wait_for_public_IPv: 4
+ user_data: |
+ # cloud-config
+ coreos:
+ etcd2:
+ discovery: https://discovery.etcd.io/
+ advertise-client-urls: http://$private_ipv4:2379,http://$private_ipv4:4001
+ initial-advertise-peer-urls: http://$private_ipv4:2380
+ listen-client-urls: http://0.0.0.0:2379,http://0.0.0.0:4001
+ listen-peer-urls: http://$private_ipv4:2380
+ fleet:
+ public-ip: $private_ipv4
+ units:
+ - name: etcd2.service
+ command: start
+ - name: fleet.service
+ command: start
+ register: newhosts
- - name: wait for ssh
- ansible.builtin.wait_for:
- delay: 1
- host: "{{ item.public_ipv4 }}"
- port: 22
- state: started
- timeout: 500
- loop: "{{ newhosts.results[0].devices }}"
+ - name: wait for ssh
+ ansible.builtin.wait_for:
+ delay: 1
+ host: "{{ item.public_ipv4 }}"
+ port: 22
+ state: started
+ timeout: 500
+ loop: "{{ newhosts.results[0].devices }}"
As with most Ansible modules, the default states of the Packet modules are idempotent, meaning the resources in your project will remain the same after re-runs of a playbook. Thus, we can keep the ``packet_sshkey`` module call in our playbook. If the public key is already in your Packet account, the call will have no effect.
diff --git a/docs/docsite/rst/guide_uthelper.rst b/docs/docsite/rst/guide_uthelper.rst
new file mode 100644
index 0000000000..657ced66cf
--- /dev/null
+++ b/docs/docsite/rst/guide_uthelper.rst
@@ -0,0 +1,394 @@
+..
+ Copyright (c) Ansible Project
+ GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+ SPDX-License-Identifier: GPL-3.0-or-later
+
+.. _ansible_collections.community.general.docsite.guide_uthelper:
+
+UTHelper Guide
+==============
+
+Introduction
+^^^^^^^^^^^^
+
+``UTHelper`` was written to reduce the boilerplate code used in unit tests for modules.
+It was originally written to handle tests of modules that run external commands using ``AnsibleModule.run_command()``.
+At the time of writing (Feb 2025) that remains the only type of tests you can use
+``UTHelper`` for, but it aims to provide support for other types of interactions.
+
+Until now, there are many different ways to implement unit tests that validate a module based on the execution of external commands. See some examples:
+
+* `test_apk.py `_ - A very simple one
+* `test_bootc_manage.py `_ -
+ This one has more test cases, but do notice how the code is repeated amongst them.
+* `test_modprobe.py `_ -
+ This one has 15 tests in it, but to achieve that it declares 8 classes repeating quite a lot of code.
+
+As you can notice, there is no consistency in the way these tests are executed -
+they all do the same thing eventually, but each one is written in a very distinct way.
+
+``UTHelper`` aims to:
+
+* provide a consistent idiom to define unit tests
+* reduce the code to a bare minimal, and
+* define tests as data instead
+* allow the test cases definition to be expressed not only as a Python data structure but also as YAML content
+
+Quickstart
+""""""""""
+
+To use UTHelper, your test module will need only a bare minimal of code:
+
+.. code-block:: python
+
+ # tests/unit/plugin/modules/test_ansible_module.py
+ from ansible_collections.community.general.plugins.modules import ansible_module
+ from .uthelper import UTHelper, RunCommandMock
+
+
+ UTHelper.from_module(ansible_module, __name__, mocks=[RunCommandMock])
+
+Then, in the test specification file, you have:
+
+.. code-block:: yaml
+
+ # tests/unit/plugin/modules/test_ansible_module.yaml
+ test_cases:
+ - id: test_ansible_module
+ flags:
+ diff: true
+ input:
+ state: present
+ name: Roger the Shrubber
+ output:
+ shrubbery:
+ looks: nice
+ price: not too expensive
+ changed: true
+ diff:
+ before:
+ shrubbery: null
+ after:
+ shrubbery:
+ looks: nice
+ price: not too expensive
+ mocks:
+ run_command:
+ - command: [/testbin/shrubber, --version]
+ rc: 0
+ out: "2.80.0\n"
+ err: ''
+ - command: [/testbin/shrubber, --make-shrubbery]
+ rc: 0
+ out: 'Shrubbery created'
+ err: ''
+
+.. note::
+
+ If you prefer to pick a different YAML file for the test cases, or if you prefer to define them in plain Python,
+ you can use the convenience methods ``UTHelper.from_file()`` and ``UTHelper.from_spec()``, respectively.
+ See more details below.
+
+
+Using ``UTHelper``
+^^^^^^^^^^^^^^^^^^
+
+Test Module
+"""""""""""
+
+``UTHelper`` is **strictly for unit tests**. To use it, you import the ``.uthelper.UTHelper`` class.
+As mentioned in different parts of this guide, there are three different mechanisms to load the test cases.
+
+.. seealso::
+
+ See the UTHelper class reference below for API details on the three different mechanisms.
+
+
+The easies and most recommended way of using ``UTHelper`` is literally the example shown.
+See a real world example at
+`test_gconftool2.py `_.
+
+The ``from_module()`` method will pick the filename of the test module up (in the example above, ``tests/unit/plugins/modules/test_gconftool2.py``)
+and it will search for ``tests/unit/plugins/modules/test_gconftool2.yaml`` (or ``.yml`` if that is not found).
+In that file it will expect to find the test specification expressed in YAML format, conforming to the structure described below LINK LINK LINK.
+
+If you prefer to read the test specifications a different file path, use ``from_file()`` passing the file handle for the YAML file.
+
+And, if for any reason you prefer or need to pass the data structure rather than dealing with YAML files, use the ``from_spec()`` method.
+A real world example for that can be found at
+`test_snap.py `_.
+
+
+Test Specification
+""""""""""""""""""
+
+The structure of the test specification data is described below.
+
+Top level
+---------
+
+At the top level there are two accepted keys:
+
+- ``anchors: dict``
+ Optional. Placeholder for you to define YAML anchors that can be repeated in the test cases.
+ Its contents are never accessed directly by test Helper.
+- ``test_cases: list``
+ Mandatory. List of test cases, see below for definition.
+
+Test cases
+----------
+
+You write the test cases with five elements:
+
+- ``id: str``
+ Mandatory. Used to identify the test case.
+
+- ``flags: dict``
+ Optional. Flags controling the behavior of the test case. All flags are optional. Accepted flags:
+
+ * ``check: bool``: set to ``true`` if the module is to be executed in **check mode**.
+ * ``diff: bool``: set to ``true`` if the module is to be executed in **diff mode**.
+ * ``skip: str``: set the test case to be skipped, providing the message for ``pytest.skip()``.
+ * ``xfail: str``: set the test case to expect failure, providing the message for ``pytest.xfail()``.
+
+- ``input: dict``
+ Optional. Parameters for the Ansible module, it can be empty.
+
+- ``output: dict``
+ Optional. Expected return values from the Ansible module.
+ All RV names are used here are expected to be found in the module output, but not all RVs in the output must be here.
+ It can include special RVs such as ``changed`` and ``diff``.
+ It can be empty.
+
+- ``mocks: dict``
+ Optional. Mocked interactions, ``run_command`` being the only one supported for now.
+ Each key in this dictionary refers to one subclass of ``TestCaseMock`` and its
+ structure is dictated by the ``TestCaseMock`` subclass implementation.
+ All keys are expected to be named using snake case, as in ``run_command``.
+ The ``TestCaseMock`` subclass is responsible for defining the name used in the test specification.
+ The structure for that specification is dependent on the implementing class.
+ See more details below for the implementation of ``RunCommandMock``
+
+Example using YAML
+------------------
+
+We recommend you use ``UTHelper`` reading the test specifications from a YAML file.
+See an example below of how one actually looks like (excerpt from ``test_opkg.yaml``):
+
+.. code-block:: yaml
+
+ ---
+ anchors:
+ environ: &env-def {environ_update: {LANGUAGE: C, LC_ALL: C}, check_rc: false}
+ test_cases:
+ - id: install_zlibdev
+ input:
+ name: zlib-dev
+ state: present
+ output:
+ msg: installed 1 package(s)
+ mocks:
+ run_command:
+ - command: [/testbin/opkg, --version]
+ environ: *env-def
+ rc: 0
+ out: ''
+ err: ''
+ - command: [/testbin/opkg, list-installed, zlib-dev]
+ environ: *env-def
+ rc: 0
+ out: ''
+ err: ''
+ - command: [/testbin/opkg, install, zlib-dev]
+ environ: *env-def
+ rc: 0
+ out: |
+ Installing zlib-dev (1.2.11-6) to root...
+ Downloading https://downloads.openwrt.org/releases/22.03.0/packages/mips_24kc/base/zlib-dev_1.2.11-6_mips_24kc.ipk
+ Installing zlib (1.2.11-6) to root...
+ Downloading https://downloads.openwrt.org/releases/22.03.0/packages/mips_24kc/base/zlib_1.2.11-6_mips_24kc.ipk
+ Configuring zlib.
+ Configuring zlib-dev.
+ err: ''
+ - command: [/testbin/opkg, list-installed, zlib-dev]
+ environ: *env-def
+ rc: 0
+ out: |
+ zlib-dev - 1.2.11-6
+ err: ''
+ - id: install_zlibdev_present
+ input:
+ name: zlib-dev
+ state: present
+ output:
+ msg: package(s) already present
+ mocks:
+ run_command:
+ - command: [/testbin/opkg, --version]
+ environ: *env-def
+ rc: 0
+ out: ''
+ err: ''
+ - command: [/testbin/opkg, list-installed, zlib-dev]
+ environ: *env-def
+ rc: 0
+ out: |
+ zlib-dev - 1.2.11-6
+ err: ''
+
+TestCaseMocks Specifications
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The ``TestCaseMock`` subclass is free to define the expected data structure.
+
+RunCommandMock Specification
+""""""""""""""""""""""""""""
+
+``RunCommandMock`` mocks can be specified with the key ``run_command`` and it expects a ``list`` in which elements follow the structure:
+
+- ``command: Union[list, str]``
+ Mandatory. The command that is expected to be executed by the module. It corresponds to the parameter ``args`` of the ``AnsibleModule.run_command()`` call.
+ It can be either a list or a string, though the list form is generally recommended.
+- ``environ: dict``
+ Mandatory. All other parameters passed to the ``AnsibleModule.run_command()`` call.
+ Most commonly used are ``environ_update`` and ``check_rc``.
+ Must include all parameters the Ansible module uses in the ``AnsibleModule.run_command()`` call, otherwise the test will fail.
+- ``rc: int``
+ Mandatory. The return code for the command execution.
+ As per usual in bash scripting, a value of ``0`` means success, whereas any other number is an error code.
+- ``out: str``
+ Mandatory. The *stdout* result of the command execution, as one single string containing zero or more lines.
+- ``err: str``
+ Mandatory. The *stderr* result of the command execution, as one single string containing zero or more lines.
+
+
+``UTHelper`` Reference
+^^^^^^^^^^^^^^^^^^^^^^
+
+.. py:module:: .uthelper
+
+ .. py:class:: UTHelper
+
+ A class to encapsulate unit tests.
+
+ .. py:staticmethod:: from_spec(ansible_module, test_module, test_spec, mocks=None)
+
+ Creates an ``UTHelper`` instance from a given test specification.
+
+ :param ansible_module: The Ansible module to be tested.
+ :type ansible_module: module
+ :param test_module: The test module.
+ :type test_module: module
+ :param test_spec: The test specification.
+ :type test_spec: dict
+ :param mocks: List of ``TestCaseMocks`` to be used during testing. Currently only ``RunCommandMock`` exists.
+ :type mocks: list or None
+ :return: An ``UTHelper`` instance.
+ :rtype: UTHelper
+
+ Example usage of ``from_spec()``:
+
+ .. code-block:: python
+
+ import sys
+
+ from ansible_collections.community.general.plugins.modules import ansible_module
+ from .uthelper import UTHelper, RunCommandMock
+
+ TEST_SPEC = dict(
+ test_cases=[
+ ...
+ ]
+ )
+
+ helper = UTHelper.from_spec(ansible_module, sys.modules[__name__], TEST_SPEC, mocks=[RunCommandMock])
+
+ .. py:staticmethod:: from_file(ansible_module, test_module, test_spec_filehandle, mocks=None)
+
+ Creates an ``UTHelper`` instance from a test specification file.
+
+ :param ansible_module: The Ansible module to be tested.
+ :type ansible_module: module
+ :param test_module: The test module.
+ :type test_module: module
+ :param test_spec_filehandle: A file handle to an file stream handle providing the test specification in YAML format.
+ :type test_spec_filehandle: file
+ :param mocks: List of ``TestCaseMocks`` to be used during testing. Currently only ``RunCommandMock`` exists.
+ :type mocks: list or None
+ :return: An ``UTHelper`` instance.
+ :rtype: UTHelper
+
+ Example usage of ``from_file()``:
+
+ .. code-block:: python
+
+ import sys
+
+ from ansible_collections.community.general.plugins.modules import ansible_module
+ from .uthelper import UTHelper, RunCommandMock
+
+ with open("test_spec.yaml", "r") as test_spec_filehandle:
+ helper = UTHelper.from_file(ansible_module, sys.modules[__name__], test_spec_filehandle, mocks=[RunCommandMock])
+
+ .. py:staticmethod:: from_module(ansible_module, test_module_name, mocks=None)
+
+ Creates an ``UTHelper`` instance from a given Ansible module and test module.
+
+ :param ansible_module: The Ansible module to be tested.
+ :type ansible_module: module
+ :param test_module_name: The name of the test module. It works if passed ``__name__``.
+ :type test_module_name: str
+ :param mocks: List of ``TestCaseMocks`` to be used during testing. Currently only ``RunCommandMock`` exists.
+ :type mocks: list or None
+ :return: An ``UTHelper`` instance.
+ :rtype: UTHelper
+
+ Example usage of ``from_module()``:
+
+ .. code-block:: python
+
+ from ansible_collections.community.general.plugins.modules import ansible_module
+ from .uthelper import UTHelper, RunCommandMock
+
+ # Example usage
+ helper = UTHelper.from_module(ansible_module, __name__, mocks=[RunCommandMock])
+
+
+Creating TestCaseMocks
+^^^^^^^^^^^^^^^^^^^^^^
+
+To create a new ``TestCaseMock`` you must extend that class and implement the relevant parts:
+
+.. code-block:: python
+
+ class ShrubberyMock(TestCaseMock):
+ # this name is mandatory, it is the name used in the test specification
+ name = "shrubbery"
+
+ def setup(self, mocker):
+ # perform setup, commonly using mocker to patch some other piece of code
+ ...
+
+ def check(self, test_case, results):
+ # verify the tst execution met the expectations of the test case
+ # for example the function was called as many times as it should
+ ...
+
+ def fixtures(self):
+ # returns a dict mapping names to pytest fixtures that should be used for the test case
+ # for example, in RunCommandMock it creates a fixture that patches AnsibleModule.get_bin_path
+ ...
+
+Caveats
+^^^^^^^
+
+Known issues/opportunities for improvement:
+
+* Only one ``UTHelper`` per test module: UTHelper injects a test function with a fixed name into the module's namespace,
+ so placing a second ``UTHelper`` instance is going to overwrite the function created by the first one.
+* Order of elements in module's namespace is not consistent across executions in Python 3.5, so if adding more tests to the test module
+ might make Test Helper add its function before or after the other test functions.
+ In the community.general collection the CI processes uses ``pytest-xdist`` to paralellize and distribute the tests,
+ and it requires the order of the tests to be consistent.
+
+.. versionadded:: 7.5.0
diff --git a/docs/docsite/rst/guide_vardict.rst b/docs/docsite/rst/guide_vardict.rst
index f65b09055b..1beef0c57f 100644
--- a/docs/docsite/rst/guide_vardict.rst
+++ b/docs/docsite/rst/guide_vardict.rst
@@ -51,7 +51,7 @@ And by the time the module is about to exit:
That makes the return value of the module:
-.. code-block:: javascript
+.. code-block:: json
{
"abc": 123,
diff --git a/docs/docsite/rst/test_guide.rst b/docs/docsite/rst/test_guide.rst
index 7a261c7552..a1f5723df4 100644
--- a/docs/docsite/rst/test_guide.rst
+++ b/docs/docsite/rst/test_guide.rst
@@ -8,7 +8,7 @@
community.general Test (Plugin) Guide
=====================================
-The :ref:`community.general collection ` offers currently one test plugin.
+The :anscollection:`community.general collection ` offers currently one test plugin.
.. contents:: Topics
diff --git a/galaxy.yml b/galaxy.yml
index 4578922ee4..a39ffcc7e5 100644
--- a/galaxy.yml
+++ b/galaxy.yml
@@ -5,7 +5,7 @@
namespace: community
name: general
-version: 9.5.9
+version: 11.2.0
readme: README.md
authors:
- Ansible (https://github.com/ansible)
diff --git a/meta/runtime.yml b/meta/runtime.yml
index 5d4ed8cb89..4efdc68688 100644
--- a/meta/runtime.yml
+++ b/meta/runtime.yml
@@ -3,7 +3,7 @@
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-requires_ansible: '>=2.13.0'
+requires_ansible: '>=2.16.0'
action_groups:
consul:
- consul_agent_check
@@ -15,22 +15,37 @@ action_groups:
- consul_session
- consul_token
proxmox:
- - proxmox
- - proxmox_disk
- - proxmox_domain_info
- - proxmox_group_info
- - proxmox_kvm
- - proxmox_nic
- - proxmox_node_info
- - proxmox_pool
- - proxmox_pool_member
- - proxmox_snap
- - proxmox_storage_contents_info
- - proxmox_storage_info
- - proxmox_tasks_info
- - proxmox_template
- - proxmox_user_info
- - proxmox_vm_info
+ - metadata:
+ extend_group:
+ - community.proxmox.proxmox
+ keycloak:
+ - keycloak_authentication
+ - keycloak_authentication_required_actions
+ - keycloak_authz_authorization_scope
+ - keycloak_authz_custom_policy
+ - keycloak_authz_permission
+ - keycloak_authz_permission_info
+ - keycloak_client
+ - keycloak_client_rolemapping
+ - keycloak_client_rolescope
+ - keycloak_clientscope
+ - keycloak_clientscope_type
+ - keycloak_clientsecret_info
+ - keycloak_clientsecret_regenerate
+ - keycloak_clienttemplate
+ - keycloak_component
+ - keycloak_component_info
+ - keycloak_group
+ - keycloak_identity_provider
+ - keycloak_realm
+ - keycloak_realm_key
+ - keycloak_realm_keys_metadata_info
+ - keycloak_realm_rolemapping
+ - keycloak_role
+ - keycloak_user
+ - keycloak_user_federation
+ - keycloak_user_rolemapping
+ - keycloak_userprofile
plugin_routing:
callback:
actionable:
@@ -44,7 +59,7 @@ plugin_routing:
warning_text: Use the 'default' callback plugin with 'display_skipped_hosts
= no' option.
hipchat:
- deprecation:
+ tombstone:
removal_version: 10.0.0
warning_text: The hipchat service has been discontinued and the self-hosted variant has been End of Life since 2020.
osx_say:
@@ -54,16 +69,30 @@ plugin_routing:
removal_version: 2.0.0
warning_text: Use the 'default' callback plugin with 'display_failed_stderr
= yes' option.
+ yaml:
+ deprecation:
+ removal_version: 12.0.0
+ warning_text: >-
+ The plugin has been superseded by the the option `result_format=yaml` in callback plugin ansible.builtin.default from ansible-core 2.13 onwards.
connection:
docker:
redirect: community.docker.docker
oc:
redirect: community.okd.oc
+ proxmox_pct_remote:
+ redirect: community.proxmox.proxmox_pct_remote
+ deprecation:
+ removal_version: 15.0.0
+ warning_text: The proxmox content has been moved to community.proxmox.
lookup:
gcp_storage_file:
redirect: community.google.gcp_storage_file
hashi_vault:
redirect: community.hashi_vault.hashi_vault
+ manifold:
+ tombstone:
+ removal_version: 11.0.0
+ warning_text: Company was acquired in 2021 and service was ceased afterwards.
nios:
redirect: infoblox.nios_modules.nios_lookup
nios_next_ip:
@@ -71,140 +100,68 @@ plugin_routing:
nios_next_network:
redirect: infoblox.nios_modules.nios_next_network
modules:
- consul_acl:
- deprecation:
- removal_version: 10.0.0
- warning_text: Use community.general.consul_token and/or community.general.consul_policy instead.
- hipchat:
- deprecation:
- removal_version: 11.0.0
- warning_text: The hipchat service has been discontinued and the self-hosted variant has been End of Life since 2020.
- rax_cbs_attachments:
- tombstone:
- removal_version: 9.0.0
- warning_text: This module relied on the deprecated package pyrax.
- rax_cbs:
- tombstone:
- removal_version: 9.0.0
- warning_text: This module relied on the deprecated package pyrax.
- rax_cdb_database:
- tombstone:
- removal_version: 9.0.0
- warning_text: This module relied on the deprecated package pyrax.
- rax_cdb_user:
- tombstone:
- removal_version: 9.0.0
- warning_text: This module relied on the deprecated package pyrax.
- rax_cdb:
- tombstone:
- removal_version: 9.0.0
- warning_text: This module relied on the deprecated package pyrax.
- rax_clb_nodes:
- tombstone:
- removal_version: 9.0.0
- warning_text: This module relied on the deprecated package pyrax.
- rax_clb_ssl:
- tombstone:
- removal_version: 9.0.0
- warning_text: This module relied on the deprecated package pyrax.
- rax_clb:
- tombstone:
- removal_version: 9.0.0
- warning_text: This module relied on the deprecated package pyrax.
- rax_dns_record:
- tombstone:
- removal_version: 9.0.0
- warning_text: This module relied on the deprecated package pyrax.
- rax_dns:
- tombstone:
- removal_version: 9.0.0
- warning_text: This module relied on the deprecated package pyrax.
- rax_facts:
- tombstone:
- removal_version: 9.0.0
- warning_text: This module relied on the deprecated package pyrax.
- rax_files_objects:
- tombstone:
- removal_version: 9.0.0
- warning_text: This module relied on the deprecated package pyrax.
- rax_files:
- tombstone:
- removal_version: 9.0.0
- warning_text: This module relied on the deprecated package pyrax.
- rax_identity:
- tombstone:
- removal_version: 9.0.0
- warning_text: This module relied on the deprecated package pyrax.
- rax_keypair:
- tombstone:
- removal_version: 9.0.0
- warning_text: This module relied on the deprecated package pyrax.
- rax_meta:
- tombstone:
- removal_version: 9.0.0
- warning_text: This module relied on the deprecated package pyrax.
- rax_mon_alarm:
- tombstone:
- removal_version: 9.0.0
- warning_text: This module relied on the deprecated package pyrax.
- rax:
- tombstone:
- removal_version: 9.0.0
- warning_text: This module relied on the deprecated package pyrax.
- rax_mon_check:
- tombstone:
- removal_version: 9.0.0
- warning_text: This module relied on the deprecated package pyrax.
- rax_mon_entity:
- tombstone:
- removal_version: 9.0.0
- warning_text: This module relied on the deprecated package pyrax.
- rax_mon_notification_plan:
- tombstone:
- removal_version: 9.0.0
- warning_text: This module relied on the deprecated package pyrax.
- rax_mon_notification:
- tombstone:
- removal_version: 9.0.0
- warning_text: This module relied on the deprecated package pyrax.
- rax_network:
- tombstone:
- removal_version: 9.0.0
- warning_text: This module relied on the deprecated package pyrax.
- rax_queue:
- tombstone:
- removal_version: 9.0.0
- warning_text: This module relied on the deprecated package pyrax.
- rax_scaling_group:
- tombstone:
- removal_version: 9.0.0
- warning_text: This module relied on the deprecated package pyrax.
- rax_scaling_policy:
- tombstone:
- removal_version: 9.0.0
- warning_text: This module relied on the deprecated package pyrax.
- rhn_channel:
- deprecation:
- removal_version: 10.0.0
- warning_text: RHN is EOL, please contact the community.general maintainers
- if still using this; see the module documentation for more details.
- rhn_register:
- deprecation:
- removal_version: 10.0.0
- warning_text: RHN is EOL, please contact the community.general maintainers
- if still using this; see the module documentation for more details.
- stackdriver:
- tombstone:
- removal_version: 9.0.0
- warning_text: This module relied on HTTPS APIs that do not exist anymore,
- and any new development in the direction of providing an alternative should
- happen in the context of the google.cloud collection.
ali_instance_facts:
tombstone:
removal_version: 3.0.0
warning_text: Use community.general.ali_instance_info instead.
+ atomic_container:
+ deprecation:
+ removal_version: 13.0.0
+ warning_text: Project Atomic was sunset by the end of 2019.
+ atomic_host:
+ deprecation:
+ removal_version: 13.0.0
+ warning_text: Project Atomic was sunset by the end of 2019.
+ atomic_image:
+ deprecation:
+ removal_version: 13.0.0
+ warning_text: Project Atomic was sunset by the end of 2019.
+ catapult:
+ deprecation:
+ removal_version: 13.0.0
+ warning_text: DNS fails to resolve the API endpoint used by the module since Oct 2024. See https://github.com/ansible-collections/community.general/issues/10318 for details.
cisco_spark:
redirect: community.general.cisco_webex
+ clc_alert_policy:
+ tombstone:
+ removal_version: 11.0.0
+ warning_text: CenturyLink Cloud services went EOL in September 2023.
+ clc_blueprint_package:
+ tombstone:
+ removal_version: 11.0.0
+ warning_text: CenturyLink Cloud services went EOL in September 2023.
+ clc_firewall_policy:
+ tombstone:
+ removal_version: 11.0.0
+ warning_text: CenturyLink Cloud services went EOL in September 2023.
+ clc_group:
+ tombstone:
+ removal_version: 11.0.0
+ warning_text: CenturyLink Cloud services went EOL in September 2023.
+ clc_loadbalancer:
+ tombstone:
+ removal_version: 11.0.0
+ warning_text: CenturyLink Cloud services went EOL in September 2023.
+ clc_modify_server:
+ tombstone:
+ removal_version: 11.0.0
+ warning_text: CenturyLink Cloud services went EOL in September 2023.
+ clc_publicip:
+ tombstone:
+ removal_version: 11.0.0
+ warning_text: CenturyLink Cloud services went EOL in September 2023.
+ clc_server:
+ tombstone:
+ removal_version: 11.0.0
+ warning_text: CenturyLink Cloud services went EOL in September 2023.
+ clc_server_snapshot:
+ tombstone:
+ removal_version: 11.0.0
+ warning_text: CenturyLink Cloud services went EOL in September 2023.
+ consul_acl:
+ tombstone:
+ removal_version: 10.0.0
+ warning_text: Use community.general.consul_token and/or community.general.consul_policy instead.
docker_compose:
redirect: community.docker.docker_compose
docker_config:
@@ -259,6 +216,10 @@ plugin_routing:
redirect: community.docker.docker_volume
docker_volume_info:
redirect: community.docker.docker_volume_info
+ facter:
+ deprecation:
+ removal_version: 12.0.0
+ warning_text: Use community.general.facter_facts instead.
flowdock:
tombstone:
removal_version: 9.0.0
@@ -352,6 +313,10 @@ plugin_routing:
redirect: community.hrobot.firewall
hetzner_firewall_info:
redirect: community.hrobot.firewall_info
+ hipchat:
+ tombstone:
+ removal_version: 11.0.0
+ warning_text: The hipchat service has been discontinued and the self-hosted variant has been End of Life since 2020.
hpilo_facts:
tombstone:
removal_version: 3.0.0
@@ -673,6 +638,116 @@ plugin_routing:
redirect: community.postgresql.postgresql_user
postgresql_user_obj_stat_info:
redirect: community.postgresql.postgresql_user_obj_stat_info
+ profitbricks:
+ tombstone:
+ removal_version: 11.0.0
+ warning_text: Supporting library is unsupported since 2021.
+ profitbricks_datacenter:
+ tombstone:
+ removal_version: 11.0.0
+ warning_text: Supporting library is unsupported since 2021.
+ profitbricks_nic:
+ tombstone:
+ removal_version: 11.0.0
+ warning_text: Supporting library is unsupported since 2021.
+ profitbricks_volume:
+ tombstone:
+ removal_version: 11.0.0
+ warning_text: Supporting library is unsupported since 2021.
+ profitbricks_volume_attachments:
+ tombstone:
+ removal_version: 11.0.0
+ warning_text: Supporting library is unsupported since 2021.
+ proxmox:
+ redirect: community.proxmox.proxmox
+ deprecation:
+ removal_version: 15.0.0
+ warning_text: The proxmox content has been moved to community.proxmox.
+ proxmox_backup:
+ redirect: community.proxmox.proxmox_backup
+ deprecation:
+ removal_version: 15.0.0
+ warning_text: The proxmox content has been moved to community.proxmox.
+ proxmox_backup_info:
+ redirect: community.proxmox.proxmox_backup_info
+ deprecation:
+ removal_version: 15.0.0
+ warning_text: The proxmox content has been moved to community.proxmox.
+ proxmox_disk:
+ redirect: community.proxmox.proxmox_disk
+ deprecation:
+ removal_version: 15.0.0
+ warning_text: The proxmox content has been moved to community.proxmox.
+ proxmox_domain_info:
+ redirect: community.proxmox.proxmox_domain_info
+ deprecation:
+ removal_version: 15.0.0
+ warning_text: The proxmox content has been moved to community.proxmox.
+ proxmox_group_info:
+ redirect: community.proxmox.proxmox_group_info
+ deprecation:
+ removal_version: 15.0.0
+ warning_text: The proxmox content has been moved to community.proxmox.
+ proxmox_kvm:
+ redirect: community.proxmox.proxmox_kvm
+ deprecation:
+ removal_version: 15.0.0
+ warning_text: The proxmox content has been moved to community.proxmox.
+ proxmox_nic:
+ redirect: community.proxmox.proxmox_nic
+ deprecation:
+ removal_version: 15.0.0
+ warning_text: The proxmox content has been moved to community.proxmox.
+ proxmox_node_info:
+ redirect: community.proxmox.proxmox_node_info
+ deprecation:
+ removal_version: 15.0.0
+ warning_text: The proxmox content has been moved to community.proxmox.
+ proxmox_pool:
+ redirect: community.proxmox.proxmox_pool
+ deprecation:
+ removal_version: 15.0.0
+ warning_text: The proxmox content has been moved to community.proxmox.
+ proxmox_pool_member:
+ redirect: community.proxmox.proxmox_pool_member
+ deprecation:
+ removal_version: 15.0.0
+ warning_text: The proxmox content has been moved to community.proxmox.
+ proxmox_snap:
+ redirect: community.proxmox.proxmox_snap
+ deprecation:
+ removal_version: 15.0.0
+ warning_text: The proxmox content has been moved to community.proxmox.
+ proxmox_storage_contents_info:
+ redirect: community.proxmox.proxmox_storage_contents_info
+ deprecation:
+ removal_version: 15.0.0
+ warning_text: The proxmox content has been moved to community.proxmox.
+ proxmox_storage_info:
+ redirect: community.proxmox.proxmox_storage_info
+ deprecation:
+ removal_version: 15.0.0
+ warning_text: The proxmox content has been moved to community.proxmox.
+ proxmox_tasks_info:
+ redirect: community.proxmox.proxmox_tasks_info
+ deprecation:
+ removal_version: 15.0.0
+ warning_text: The proxmox content has been moved to community.proxmox.
+ proxmox_template:
+ redirect: community.proxmox.proxmox_template
+ deprecation:
+ removal_version: 15.0.0
+ warning_text: The proxmox content has been moved to community.proxmox.
+ proxmox_user_info:
+ redirect: community.proxmox.proxmox_user_info
+ deprecation:
+ removal_version: 15.0.0
+ warning_text: The proxmox content has been moved to community.proxmox.
+ proxmox_vm_info:
+ redirect: community.proxmox.proxmox_vm_info
+ deprecation:
+ removal_version: 15.0.0
+ warning_text: The proxmox content has been moved to community.proxmox.
purefa_facts:
tombstone:
removal_version: 3.0.0
@@ -685,10 +760,122 @@ plugin_routing:
tombstone:
removal_version: 3.0.0
warning_text: Use community.general.python_requirements_info instead.
+ rax_cbs_attachments:
+ tombstone:
+ removal_version: 9.0.0
+ warning_text: This module relied on the deprecated package pyrax.
+ rax_cbs:
+ tombstone:
+ removal_version: 9.0.0
+ warning_text: This module relied on the deprecated package pyrax.
+ rax_cdb_database:
+ tombstone:
+ removal_version: 9.0.0
+ warning_text: This module relied on the deprecated package pyrax.
+ rax_cdb_user:
+ tombstone:
+ removal_version: 9.0.0
+ warning_text: This module relied on the deprecated package pyrax.
+ rax_cdb:
+ tombstone:
+ removal_version: 9.0.0
+ warning_text: This module relied on the deprecated package pyrax.
+ rax_clb_nodes:
+ tombstone:
+ removal_version: 9.0.0
+ warning_text: This module relied on the deprecated package pyrax.
+ rax_clb_ssl:
+ tombstone:
+ removal_version: 9.0.0
+ warning_text: This module relied on the deprecated package pyrax.
+ rax_clb:
+ tombstone:
+ removal_version: 9.0.0
+ warning_text: This module relied on the deprecated package pyrax.
+ rax_dns_record:
+ tombstone:
+ removal_version: 9.0.0
+ warning_text: This module relied on the deprecated package pyrax.
+ rax_dns:
+ tombstone:
+ removal_version: 9.0.0
+ warning_text: This module relied on the deprecated package pyrax.
+ rax_facts:
+ tombstone:
+ removal_version: 9.0.0
+ warning_text: This module relied on the deprecated package pyrax.
+ rax_files_objects:
+ tombstone:
+ removal_version: 9.0.0
+ warning_text: This module relied on the deprecated package pyrax.
+ rax_files:
+ tombstone:
+ removal_version: 9.0.0
+ warning_text: This module relied on the deprecated package pyrax.
+ rax_identity:
+ tombstone:
+ removal_version: 9.0.0
+ warning_text: This module relied on the deprecated package pyrax.
+ rax_keypair:
+ tombstone:
+ removal_version: 9.0.0
+ warning_text: This module relied on the deprecated package pyrax.
+ rax_meta:
+ tombstone:
+ removal_version: 9.0.0
+ warning_text: This module relied on the deprecated package pyrax.
+ rax_mon_alarm:
+ tombstone:
+ removal_version: 9.0.0
+ warning_text: This module relied on the deprecated package pyrax.
+ rax:
+ tombstone:
+ removal_version: 9.0.0
+ warning_text: This module relied on the deprecated package pyrax.
+ rax_mon_check:
+ tombstone:
+ removal_version: 9.0.0
+ warning_text: This module relied on the deprecated package pyrax.
+ rax_mon_entity:
+ tombstone:
+ removal_version: 9.0.0
+ warning_text: This module relied on the deprecated package pyrax.
+ rax_mon_notification_plan:
+ tombstone:
+ removal_version: 9.0.0
+ warning_text: This module relied on the deprecated package pyrax.
+ rax_mon_notification:
+ tombstone:
+ removal_version: 9.0.0
+ warning_text: This module relied on the deprecated package pyrax.
+ rax_network:
+ tombstone:
+ removal_version: 9.0.0
+ warning_text: This module relied on the deprecated package pyrax.
+ rax_queue:
+ tombstone:
+ removal_version: 9.0.0
+ warning_text: This module relied on the deprecated package pyrax.
+ rax_scaling_group:
+ tombstone:
+ removal_version: 9.0.0
+ warning_text: This module relied on the deprecated package pyrax.
+ rax_scaling_policy:
+ tombstone:
+ removal_version: 9.0.0
+ warning_text: This module relied on the deprecated package pyrax.
redfish_facts:
tombstone:
removal_version: 3.0.0
warning_text: Use community.general.redfish_info instead.
+ rhn_channel:
+ tombstone:
+ removal_version: 10.0.0
+ warning_text: RHN is EOL.
+ rhn_register:
+ tombstone:
+ removal_version: 10.0.0
+ warning_text: RHN is EOL.
sapcar_extract:
redirect: community.sap_libs.sapcar_extract
sap_task_list_execute:
@@ -721,6 +908,26 @@ plugin_routing:
tombstone:
removal_version: 3.0.0
warning_text: Use community.general.scaleway_volume_info instead.
+ sensu_check:
+ deprecation:
+ removal_version: 13.0.0
+ warning_text: Sensu Core and Sensu Enterprise products have been End of Life since 2019/20.
+ sensu_client:
+ deprecation:
+ removal_version: 13.0.0
+ warning_text: Sensu Core and Sensu Enterprise products have been End of Life since 2019/20.
+ sensu_handler:
+ deprecation:
+ removal_version: 13.0.0
+ warning_text: Sensu Core and Sensu Enterprise products have been End of Life since 2019/20.
+ sensu_silence:
+ deprecation:
+ removal_version: 13.0.0
+ warning_text: Sensu Core and Sensu Enterprise products have been End of Life since 2019/20.
+ sensu_subscription:
+ deprecation:
+ removal_version: 13.0.0
+ warning_text: Sensu Core and Sensu Enterprise products have been End of Life since 2019/20.
sf_account_manager:
tombstone:
removal_version: 2.0.0
@@ -745,6 +952,16 @@ plugin_routing:
tombstone:
removal_version: 3.0.0
warning_text: Use community.general.smartos_image_info instead.
+ stackdriver:
+ tombstone:
+ removal_version: 9.0.0
+ warning_text: This module relied on HTTPS APIs that do not exist anymore,
+ and any new development in the direction of providing an alternative should
+ happen in the context of the google.cloud collection.
+ typetalk:
+ deprecation:
+ removal_version: 13.0.0
+ warning_text: The typetalk service will be discontinued on Dec 2025.
vertica_facts:
tombstone:
removal_version: 3.0.0
@@ -779,11 +996,6 @@ plugin_routing:
removal_version: 3.0.0
warning_text: Use community.general.xenserver_guest_info instead.
doc_fragments:
- rackspace:
- tombstone:
- removal_version: 9.0.0
- warning_text: This doc fragment was used by rax modules, that relied on the deprecated
- package pyrax.
_gcp:
redirect: community.google._gcp
docker:
@@ -798,11 +1010,21 @@ plugin_routing:
redirect: infoblox.nios_modules.nios
postgresql:
redirect: community.postgresql.postgresql
- module_utils:
- rax:
+ proxmox:
+ redirect: community.proxmox.proxmox
+ deprecation:
+ removal_version: 15.0.0
+ warning_text: The proxmox content has been moved to community.proxmox.
+ purestorage:
+ deprecation:
+ removal_version: 12.0.0
+ warning_text: The modules for purestorage were removed in community.general 3.0.0, this document fragment was left behind.
+ rackspace:
tombstone:
removal_version: 9.0.0
- warning_text: This module util relied on the deprecated package pyrax.
+ warning_text: This doc fragment was used by rax modules, that relied on the deprecated
+ package pyrax.
+ module_utils:
docker.common:
redirect: community.docker.common
docker.swarm:
@@ -821,6 +1043,19 @@ plugin_routing:
redirect: infoblox.nios_modules.api
postgresql:
redirect: community.postgresql.postgresql
+ proxmox:
+ redirect: community.proxmox.proxmox
+ deprecation:
+ removal_version: 15.0.0
+ warning_text: The proxmox content has been moved to community.proxmox.
+ pure:
+ deprecation:
+ removal_version: 12.0.0
+ warning_text: The modules for purestorage were removed in community.general 3.0.0, this module util was left behind.
+ rax:
+ tombstone:
+ removal_version: 9.0.0
+ warning_text: This module util relied on the deprecated package pyrax.
remote_management.dellemc.dellemc_idrac:
redirect: dellemc.openmanage.dellemc_idrac
remote_management.dellemc.ome:
@@ -830,8 +1065,17 @@ plugin_routing:
redirect: community.docker.docker_machine
docker_swarm:
redirect: community.docker.docker_swarm
+ proxmox:
+ redirect: community.proxmox.proxmox
+ deprecation:
+ removal_version: 15.0.0
+ warning_text: The proxmox content has been moved to community.proxmox.
kubevirt:
redirect: community.kubevirt.kubevirt
+ stackpath_compute:
+ tombstone:
+ removal_version: 11.0.0
+ warning_text: The company and the service were sunset in June 2024.
filter:
path_join:
# The ansible.builtin.path_join filter has been added in ansible-base 2.10.
diff --git a/plugins/action/iptables_state.py b/plugins/action/iptables_state.py
index 5ea55af58c..595d0ece66 100644
--- a/plugins/action/iptables_state.py
+++ b/plugins/action/iptables_state.py
@@ -3,8 +3,7 @@
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
+from __future__ import annotations
import time
@@ -22,25 +21,33 @@ class ActionModule(ActionBase):
_VALID_ARGS = frozenset(('path', 'state', 'table', 'noflush', 'counters', 'modprobe', 'ip_version', 'wait'))
DEFAULT_SUDOABLE = True
- MSG_ERROR__ASYNC_AND_POLL_NOT_ZERO = (
- "This module doesn't support async>0 and poll>0 when its 'state' param "
- "is set to 'restored'. To enable its rollback feature (that needs the "
- "module to run asynchronously on the remote), please set task attribute "
- "'poll' (=%s) to 0, and 'async' (=%s) to a value >2 and not greater than "
- "'ansible_timeout' (=%s) (recommended).")
- MSG_WARNING__NO_ASYNC_IS_NO_ROLLBACK = (
- "Attempts to restore iptables state without rollback in case of mistake "
- "may lead the ansible controller to loose access to the hosts and never "
- "regain it before fixing firewall rules through a serial console, or any "
- "other way except SSH. Please set task attribute 'poll' (=%s) to 0, and "
- "'async' (=%s) to a value >2 and not greater than 'ansible_timeout' (=%s) "
- "(recommended).")
- MSG_WARNING__ASYNC_GREATER_THAN_TIMEOUT = (
- "You attempt to restore iptables state with rollback in case of mistake, "
- "but with settings that will lead this rollback to happen AFTER that the "
- "controller will reach its own timeout. Please set task attribute 'poll' "
- "(=%s) to 0, and 'async' (=%s) to a value >2 and not greater than "
- "'ansible_timeout' (=%s) (recommended).")
+ @staticmethod
+ def msg_error__async_and_poll_not_zero(task_poll, task_async, max_timeout):
+ return (
+ "This module doesn't support async>0 and poll>0 when its 'state' param "
+ "is set to 'restored'. To enable its rollback feature (that needs the "
+ "module to run asynchronously on the remote), please set task attribute "
+ f"'poll' (={task_poll}) to 0, and 'async' (={task_async}) to a value >2 and not greater than "
+ f"'ansible_timeout' (={max_timeout}) (recommended).")
+
+ @staticmethod
+ def msg_warning__no_async_is_no_rollback(task_poll, task_async, max_timeout):
+ return (
+ "Attempts to restore iptables state without rollback in case of mistake "
+ "may lead the ansible controller to loose access to the hosts and never "
+ "regain it before fixing firewall rules through a serial console, or any "
+ f"other way except SSH. Please set task attribute 'poll' (={task_poll}) to 0, and "
+ f"'async' (={task_async}) to a value >2 and not greater than 'ansible_timeout' (={max_timeout}) "
+ "(recommended).")
+
+ @staticmethod
+ def msg_warning__async_greater_than_timeout(task_poll, task_async, max_timeout):
+ return (
+ "You attempt to restore iptables state with rollback in case of mistake, "
+ "but with settings that will lead this rollback to happen AFTER that the "
+ "controller will reach its own timeout. Please set task attribute 'poll' "
+ f"(={task_poll}) to 0, and 'async' (={task_async}) to a value >2 and not greater than "
+ f"'ansible_timeout' (={max_timeout}) (recommended).")
def _async_result(self, async_status_args, task_vars, timeout):
'''
@@ -95,18 +102,18 @@ class ActionModule(ActionBase):
if module_args.get('state', None) == 'restored':
if not wrap_async:
if not check_mode:
- display.warning(self.MSG_WARNING__NO_ASYNC_IS_NO_ROLLBACK % (
+ display.warning(self.msg_error__async_and_poll_not_zero(
task_poll,
task_async,
max_timeout))
elif task_poll:
- raise AnsibleActionFail(self.MSG_ERROR__ASYNC_AND_POLL_NOT_ZERO % (
+ raise AnsibleActionFail(self.msg_warning__no_async_is_no_rollback(
task_poll,
task_async,
max_timeout))
else:
if task_async > max_timeout and not check_mode:
- display.warning(self.MSG_WARNING__ASYNC_GREATER_THAN_TIMEOUT % (
+ display.warning(self.msg_warning__async_greater_than_timeout(
task_poll,
task_async,
max_timeout))
@@ -119,10 +126,10 @@ class ActionModule(ActionBase):
# remote and local sides (if not the same, make the loop
# longer on the controller); and set a backup file path.
module_args['_timeout'] = task_async
- module_args['_back'] = '%s/iptables.state' % async_dir
+ module_args['_back'] = f'{async_dir}/iptables.state'
async_status_args = dict(mode='status')
- confirm_cmd = 'rm -f %s' % module_args['_back']
- starter_cmd = 'touch %s.starter' % module_args['_back']
+ confirm_cmd = f"rm -f {module_args['_back']}"
+ starter_cmd = f"touch {module_args['_back']}.starter"
remaining_time = max(task_async, max_timeout)
# do work!
diff --git a/plugins/action/shutdown.py b/plugins/action/shutdown.py
index 01201a6405..d5db878812 100644
--- a/plugins/action/shutdown.py
+++ b/plugins/action/shutdown.py
@@ -5,9 +5,8 @@
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
+from __future__ import annotations
-__metaclass__ = type
from ansible.errors import AnsibleError, AnsibleConnectionFailure
from ansible.module_utils.common.text.converters import to_native, to_text
@@ -18,6 +17,10 @@ from ansible.utils.display import Display
display = Display()
+def fmt(mapping, key):
+ return to_native(mapping[key]).strip()
+
+
class TimedOutException(Exception):
pass
@@ -84,31 +87,26 @@ class ActionModule(ActionBase):
def get_distribution(self, task_vars):
# FIXME: only execute the module if we don't already have the facts we need
distribution = {}
- display.debug('{action}: running setup module to get distribution'.format(action=self._task.action))
+ display.debug(f'{self._task.action}: running setup module to get distribution')
module_output = self._execute_module(
task_vars=task_vars,
module_name='ansible.legacy.setup',
module_args={'gather_subset': 'min'})
try:
if module_output.get('failed', False):
- raise AnsibleError('Failed to determine system distribution. {0}, {1}'.format(
- to_native(module_output['module_stdout']).strip(),
- to_native(module_output['module_stderr']).strip()))
+ raise AnsibleError(f"Failed to determine system distribution. {fmt(module_output, 'module_stdout')}, {fmt(module_output, 'module_stderr')}")
distribution['name'] = module_output['ansible_facts']['ansible_distribution'].lower()
distribution['version'] = to_text(
module_output['ansible_facts']['ansible_distribution_version'].split('.')[0])
distribution['family'] = to_text(module_output['ansible_facts']['ansible_os_family'].lower())
- display.debug("{action}: distribution: {dist}".format(action=self._task.action, dist=distribution))
+ display.debug(f"{self._task.action}: distribution: {distribution}")
return distribution
except KeyError as ke:
- raise AnsibleError('Failed to get distribution information. Missing "{0}" in output.'.format(ke.args[0]))
+ raise AnsibleError(f'Failed to get distribution information. Missing "{ke.args[0]}" in output.')
def get_shutdown_command(self, task_vars, distribution):
def find_command(command, find_search_paths):
- display.debug('{action}: running find module looking in {paths} to get path for "{command}"'.format(
- action=self._task.action,
- command=command,
- paths=find_search_paths))
+ display.debug(f'{self._task.action}: running find module looking in {find_search_paths} to get path for "{command}"')
find_result = self._execute_module(
task_vars=task_vars,
# prevent collection search by calling with ansible.legacy (still allows library/ override of find)
@@ -130,42 +128,37 @@ class ActionModule(ActionBase):
if is_string(search_paths):
search_paths = [search_paths]
- # Error if we didn't get a list
- err_msg = "'search_paths' must be a string or flat list of strings, got {0}"
try:
incorrect_type = any(not is_string(x) for x in search_paths)
if not isinstance(search_paths, list) or incorrect_type:
raise TypeError
except TypeError:
- raise AnsibleError(err_msg.format(search_paths))
+ # Error if we didn't get a list
+ err_msg = f"'search_paths' must be a string or flat list of strings, got {search_paths}"
+ raise AnsibleError(err_msg)
full_path = find_command(shutdown_bin, search_paths) # find the path to the shutdown command
if not full_path: # if we could not find the shutdown command
- display.vvv('Unable to find command "{0}" in search paths: {1}, will attempt a shutdown using systemd '
- 'directly.'.format(shutdown_bin, search_paths)) # tell the user we will try with systemd
+
+ # tell the user we will try with systemd
+ display.vvv(f'Unable to find command "{shutdown_bin}" in search paths: {search_paths}, will attempt a shutdown using systemd directly.')
systemctl_search_paths = ['/bin', '/usr/bin']
full_path = find_command('systemctl', systemctl_search_paths) # find the path to the systemctl command
if not full_path: # if we couldn't find systemctl
raise AnsibleError(
- 'Could not find command "{0}" in search paths: {1} or systemctl command in search paths: {2}, unable to shutdown.'.
- format(shutdown_bin, search_paths, systemctl_search_paths)) # we give up here
+ f'Could not find command "{shutdown_bin}" in search paths: {search_paths} or systemctl'
+ f' command in search paths: {systemctl_search_paths}, unable to shutdown.') # we give up here
else:
- return "{0} poweroff".format(full_path[0]) # done, since we cannot use args with systemd shutdown
+ return f"{full_path[0]} poweroff" # done, since we cannot use args with systemd shutdown
# systemd case taken care of, here we add args to the command
args = self._get_value_from_facts('SHUTDOWN_COMMAND_ARGS', distribution, 'DEFAULT_SHUTDOWN_COMMAND_ARGS')
# Convert seconds to minutes. If less that 60, set it to 0.
delay_sec = self.delay
shutdown_message = self._task.args.get('msg', self.DEFAULT_SHUTDOWN_MESSAGE)
- return '{0} {1}'. \
- format(
- full_path[0],
- args.format(
- delay_sec=delay_sec,
- delay_min=delay_sec // 60,
- message=shutdown_message
- )
- )
+
+ af = args.format(delay_sec=delay_sec, delay_min=delay_sec // 60, message=shutdown_message)
+ return f'{full_path[0]} {af}'
def perform_shutdown(self, task_vars, distribution):
result = {}
@@ -174,9 +167,8 @@ class ActionModule(ActionBase):
self.cleanup(force=True)
try:
- display.vvv("{action}: shutting down server...".format(action=self._task.action))
- display.debug("{action}: shutting down server with command '{command}'".
- format(action=self._task.action, command=shutdown_command_exec))
+ display.vvv(f"{self._task.action}: shutting down server...")
+ display.debug(f"{self._task.action}: shutting down server with command '{shutdown_command_exec}'")
if self._play_context.check_mode:
shutdown_result['rc'] = 0
else:
@@ -184,16 +176,13 @@ class ActionModule(ActionBase):
except AnsibleConnectionFailure as e:
# If the connection is closed too quickly due to the system being shutdown, carry on
display.debug(
- '{action}: AnsibleConnectionFailure caught and handled: {error}'.format(action=self._task.action,
- error=to_text(e)))
+ f'{self._task.action}: AnsibleConnectionFailure caught and handled: {e}')
shutdown_result['rc'] = 0
if shutdown_result['rc'] != 0:
result['failed'] = True
result['shutdown'] = False
- result['msg'] = "Shutdown command failed. Error was {stdout}, {stderr}".format(
- stdout=to_native(shutdown_result['stdout'].strip()),
- stderr=to_native(shutdown_result['stderr'].strip()))
+ result['msg'] = f"Shutdown command failed. Error was {fmt(shutdown_result, 'stdout')}, {fmt(shutdown_result, 'stderr')}"
return result
result['failed'] = False
@@ -206,7 +195,7 @@ class ActionModule(ActionBase):
# If running with local connection, fail so we don't shutdown ourself
if self._connection.transport == 'local' and (not self._play_context.check_mode):
- msg = 'Running {0} with local connection would shutdown the control node.'.format(self._task.action)
+ msg = f'Running {self._task.action} with local connection would shutdown the control node.'
return {'changed': False, 'elapsed': 0, 'shutdown': False, 'failed': True, 'msg': msg}
if task_vars is None:
diff --git a/plugins/become/doas.py b/plugins/become/doas.py
index 761e5e1e95..ca12faea0d 100644
--- a/plugins/become/doas.py
+++ b/plugins/become/doas.py
@@ -2,89 +2,91 @@
# Copyright (c) 2018, Ansible Project
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
-DOCUMENTATION = '''
- name: doas
- short_description: Do As user
+DOCUMENTATION = r"""
+name: doas
+short_description: Do As user
+description:
+ - This become plugins allows your remote/login user to execute commands as another user using the C(doas) utility.
+author: Ansible Core Team
+options:
+ become_user:
+ description: User you 'become' to execute the task.
+ type: string
+ ini:
+ - section: privilege_escalation
+ key: become_user
+ - section: doas_become_plugin
+ key: user
+ vars:
+ - name: ansible_become_user
+ - name: ansible_doas_user
+ env:
+ - name: ANSIBLE_BECOME_USER
+ - name: ANSIBLE_DOAS_USER
+ become_exe:
+ description: C(doas) executable.
+ type: string
+ default: doas
+ ini:
+ - section: privilege_escalation
+ key: become_exe
+ - section: doas_become_plugin
+ key: executable
+ vars:
+ - name: ansible_become_exe
+ - name: ansible_doas_exe
+ env:
+ - name: ANSIBLE_BECOME_EXE
+ - name: ANSIBLE_DOAS_EXE
+ become_flags:
+ description: Options to pass to C(doas).
+ type: string
+ default: ''
+ ini:
+ - section: privilege_escalation
+ key: become_flags
+ - section: doas_become_plugin
+ key: flags
+ vars:
+ - name: ansible_become_flags
+ - name: ansible_doas_flags
+ env:
+ - name: ANSIBLE_BECOME_FLAGS
+ - name: ANSIBLE_DOAS_FLAGS
+ become_pass:
+ description: Password for C(doas) prompt.
+ type: string
+ required: false
+ vars:
+ - name: ansible_become_password
+ - name: ansible_become_pass
+ - name: ansible_doas_pass
+ env:
+ - name: ANSIBLE_BECOME_PASS
+ - name: ANSIBLE_DOAS_PASS
+ ini:
+ - section: doas_become_plugin
+ key: password
+ prompt_l10n:
description:
- - This become plugins allows your remote/login user to execute commands as another user via the doas utility.
- author: Ansible Core Team
- options:
- become_user:
- description: User you 'become' to execute the task.
- type: string
- ini:
- - section: privilege_escalation
- key: become_user
- - section: doas_become_plugin
- key: user
- vars:
- - name: ansible_become_user
- - name: ansible_doas_user
- env:
- - name: ANSIBLE_BECOME_USER
- - name: ANSIBLE_DOAS_USER
- become_exe:
- description: Doas executable.
- type: string
- default: doas
- ini:
- - section: privilege_escalation
- key: become_exe
- - section: doas_become_plugin
- key: executable
- vars:
- - name: ansible_become_exe
- - name: ansible_doas_exe
- env:
- - name: ANSIBLE_BECOME_EXE
- - name: ANSIBLE_DOAS_EXE
- become_flags:
- description: Options to pass to doas.
- type: string
- default: ''
- ini:
- - section: privilege_escalation
- key: become_flags
- - section: doas_become_plugin
- key: flags
- vars:
- - name: ansible_become_flags
- - name: ansible_doas_flags
- env:
- - name: ANSIBLE_BECOME_FLAGS
- - name: ANSIBLE_DOAS_FLAGS
- become_pass:
- description: Password for doas prompt.
- type: string
- required: false
- vars:
- - name: ansible_become_password
- - name: ansible_become_pass
- - name: ansible_doas_pass
- env:
- - name: ANSIBLE_BECOME_PASS
- - name: ANSIBLE_DOAS_PASS
- ini:
- - section: doas_become_plugin
- key: password
- prompt_l10n:
- description:
- - List of localized strings to match for prompt detection.
- - If empty we will use the built in one.
- type: list
- elements: string
- default: []
- ini:
- - section: doas_become_plugin
- key: localized_prompts
- vars:
- - name: ansible_doas_prompt_l10n
- env:
- - name: ANSIBLE_DOAS_PROMPT_L10N
-'''
+ - List of localized strings to match for prompt detection.
+ - If empty the plugin uses the built-in one.
+ type: list
+ elements: string
+ default: []
+ ini:
+ - section: doas_become_plugin
+ key: localized_prompts
+ vars:
+ - name: ansible_doas_prompt_l10n
+ env:
+ - name: ANSIBLE_DOAS_PROMPT_L10N
+notes:
+ - This become plugin does not work when connection pipelining is enabled. With ansible-core 2.19+, using it automatically
+ disables pipelining. On ansible-core 2.18 and before, pipelining must explicitly be disabled by the user.
+"""
import re
@@ -100,6 +102,10 @@ class BecomeModule(BecomeBase):
fail = ('Permission denied',)
missing = ('Authorization required',)
+ # See https://github.com/ansible-collections/community.general/issues/9977,
+ # https://github.com/ansible/ansible/pull/78111
+ pipelining = False
+
def check_password_prompt(self, b_output):
''' checks if the expected password prompt exists in b_output '''
@@ -125,9 +131,9 @@ class BecomeModule(BecomeBase):
flags += ' -n'
become_user = self.get_option('become_user')
- user = '-u %s' % (become_user) if become_user else ''
+ user = f'-u {become_user}' if become_user else ''
success_cmd = self._build_success_command(cmd, shell, noexe=True)
executable = getattr(shell, 'executable', shell.SHELL_FAMILY)
- return '%s %s %s %s -c %s' % (become_exe, flags, user, executable, success_cmd)
+ return f'{become_exe} {flags} {user} {executable} -c {success_cmd}'
diff --git a/plugins/become/dzdo.py b/plugins/become/dzdo.py
index d94c684d1f..d890bede09 100644
--- a/plugins/become/dzdo.py
+++ b/plugins/become/dzdo.py
@@ -2,75 +2,74 @@
# Copyright (c) 2018, Ansible Project
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
-DOCUMENTATION = '''
- name: dzdo
- short_description: Centrify's Direct Authorize
- description:
- - This become plugins allows your remote/login user to execute commands as another user via the dzdo utility.
- author: Ansible Core Team
- options:
- become_user:
- description: User you 'become' to execute the task.
- type: string
- ini:
- - section: privilege_escalation
- key: become_user
- - section: dzdo_become_plugin
- key: user
- vars:
- - name: ansible_become_user
- - name: ansible_dzdo_user
- env:
- - name: ANSIBLE_BECOME_USER
- - name: ANSIBLE_DZDO_USER
- become_exe:
- description: Dzdo executable.
- type: string
- default: dzdo
- ini:
- - section: privilege_escalation
- key: become_exe
- - section: dzdo_become_plugin
- key: executable
- vars:
- - name: ansible_become_exe
- - name: ansible_dzdo_exe
- env:
- - name: ANSIBLE_BECOME_EXE
- - name: ANSIBLE_DZDO_EXE
- become_flags:
- description: Options to pass to dzdo.
- type: string
- default: -H -S -n
- ini:
- - section: privilege_escalation
- key: become_flags
- - section: dzdo_become_plugin
- key: flags
- vars:
- - name: ansible_become_flags
- - name: ansible_dzdo_flags
- env:
- - name: ANSIBLE_BECOME_FLAGS
- - name: ANSIBLE_DZDO_FLAGS
- become_pass:
- description: Options to pass to dzdo.
- type: string
- required: false
- vars:
- - name: ansible_become_password
- - name: ansible_become_pass
- - name: ansible_dzdo_pass
- env:
- - name: ANSIBLE_BECOME_PASS
- - name: ANSIBLE_DZDO_PASS
- ini:
- - section: dzdo_become_plugin
- key: password
-'''
+DOCUMENTATION = r"""
+name: dzdo
+short_description: Centrify's Direct Authorize
+description:
+ - This become plugins allows your remote/login user to execute commands as another user using the C(dzdo) utility.
+author: Ansible Core Team
+options:
+ become_user:
+ description: User you 'become' to execute the task.
+ type: string
+ ini:
+ - section: privilege_escalation
+ key: become_user
+ - section: dzdo_become_plugin
+ key: user
+ vars:
+ - name: ansible_become_user
+ - name: ansible_dzdo_user
+ env:
+ - name: ANSIBLE_BECOME_USER
+ - name: ANSIBLE_DZDO_USER
+ become_exe:
+ description: C(dzdo) executable.
+ type: string
+ default: dzdo
+ ini:
+ - section: privilege_escalation
+ key: become_exe
+ - section: dzdo_become_plugin
+ key: executable
+ vars:
+ - name: ansible_become_exe
+ - name: ansible_dzdo_exe
+ env:
+ - name: ANSIBLE_BECOME_EXE
+ - name: ANSIBLE_DZDO_EXE
+ become_flags:
+ description: Options to pass to C(dzdo).
+ type: string
+ default: -H -S -n
+ ini:
+ - section: privilege_escalation
+ key: become_flags
+ - section: dzdo_become_plugin
+ key: flags
+ vars:
+ - name: ansible_become_flags
+ - name: ansible_dzdo_flags
+ env:
+ - name: ANSIBLE_BECOME_FLAGS
+ - name: ANSIBLE_DZDO_FLAGS
+ become_pass:
+ description: Options to pass to C(dzdo).
+ type: string
+ required: false
+ vars:
+ - name: ansible_become_password
+ - name: ansible_become_pass
+ - name: ansible_dzdo_pass
+ env:
+ - name: ANSIBLE_BECOME_PASS
+ - name: ANSIBLE_DZDO_PASS
+ ini:
+ - section: dzdo_become_plugin
+ key: password
+"""
from ansible.plugins.become import BecomeBase
@@ -92,10 +91,10 @@ class BecomeModule(BecomeBase):
flags = self.get_option('become_flags')
if self.get_option('become_pass'):
- self.prompt = '[dzdo via ansible, key=%s] password:' % self._id
- flags = '%s -p "%s"' % (flags.replace('-n', ''), self.prompt)
+ self.prompt = f'[dzdo via ansible, key={self._id}] password:'
+ flags = f"{flags.replace('-n', '')} -p \"{self.prompt}\""
become_user = self.get_option('become_user')
- user = '-u %s' % (become_user) if become_user else ''
+ user = f'-u {become_user}' if become_user else ''
- return ' '.join([becomecmd, flags, user, self._build_success_command(cmd, shell)])
+ return f"{becomecmd} {flags} {user} {self._build_success_command(cmd, shell)}"
diff --git a/plugins/become/ksu.py b/plugins/become/ksu.py
index 2be1832dc2..be56fd6128 100644
--- a/plugins/become/ksu.py
+++ b/plugins/become/ksu.py
@@ -2,90 +2,89 @@
# Copyright (c) 2018, Ansible Project
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
-DOCUMENTATION = '''
- name: ksu
- short_description: Kerberos substitute user
+DOCUMENTATION = r"""
+name: ksu
+short_description: Kerberos substitute user
+description:
+ - This become plugins allows your remote/login user to execute commands as another user using the C(ksu) utility.
+author: Ansible Core Team
+options:
+ become_user:
+ description: User you 'become' to execute the task.
+ type: string
+ ini:
+ - section: privilege_escalation
+ key: become_user
+ - section: ksu_become_plugin
+ key: user
+ vars:
+ - name: ansible_become_user
+ - name: ansible_ksu_user
+ env:
+ - name: ANSIBLE_BECOME_USER
+ - name: ANSIBLE_KSU_USER
+ required: true
+ become_exe:
+ description: C(ksu) executable.
+ type: string
+ default: ksu
+ ini:
+ - section: privilege_escalation
+ key: become_exe
+ - section: ksu_become_plugin
+ key: executable
+ vars:
+ - name: ansible_become_exe
+ - name: ansible_ksu_exe
+ env:
+ - name: ANSIBLE_BECOME_EXE
+ - name: ANSIBLE_KSU_EXE
+ become_flags:
+ description: Options to pass to C(ksu).
+ type: string
+ default: ''
+ ini:
+ - section: privilege_escalation
+ key: become_flags
+ - section: ksu_become_plugin
+ key: flags
+ vars:
+ - name: ansible_become_flags
+ - name: ansible_ksu_flags
+ env:
+ - name: ANSIBLE_BECOME_FLAGS
+ - name: ANSIBLE_KSU_FLAGS
+ become_pass:
+ description: C(ksu) password.
+ type: string
+ required: false
+ vars:
+ - name: ansible_ksu_pass
+ - name: ansible_become_pass
+ - name: ansible_become_password
+ env:
+ - name: ANSIBLE_BECOME_PASS
+ - name: ANSIBLE_KSU_PASS
+ ini:
+ - section: ksu_become_plugin
+ key: password
+ prompt_l10n:
description:
- - This become plugins allows your remote/login user to execute commands as another user via the ksu utility.
- author: Ansible Core Team
- options:
- become_user:
- description: User you 'become' to execute the task.
- type: string
- ini:
- - section: privilege_escalation
- key: become_user
- - section: ksu_become_plugin
- key: user
- vars:
- - name: ansible_become_user
- - name: ansible_ksu_user
- env:
- - name: ANSIBLE_BECOME_USER
- - name: ANSIBLE_KSU_USER
- required: true
- become_exe:
- description: Su executable.
- type: string
- default: ksu
- ini:
- - section: privilege_escalation
- key: become_exe
- - section: ksu_become_plugin
- key: executable
- vars:
- - name: ansible_become_exe
- - name: ansible_ksu_exe
- env:
- - name: ANSIBLE_BECOME_EXE
- - name: ANSIBLE_KSU_EXE
- become_flags:
- description: Options to pass to ksu.
- type: string
- default: ''
- ini:
- - section: privilege_escalation
- key: become_flags
- - section: ksu_become_plugin
- key: flags
- vars:
- - name: ansible_become_flags
- - name: ansible_ksu_flags
- env:
- - name: ANSIBLE_BECOME_FLAGS
- - name: ANSIBLE_KSU_FLAGS
- become_pass:
- description: Ksu password.
- type: string
- required: false
- vars:
- - name: ansible_ksu_pass
- - name: ansible_become_pass
- - name: ansible_become_password
- env:
- - name: ANSIBLE_BECOME_PASS
- - name: ANSIBLE_KSU_PASS
- ini:
- - section: ksu_become_plugin
- key: password
- prompt_l10n:
- description:
- - List of localized strings to match for prompt detection.
- - If empty we will use the built in one.
- type: list
- elements: string
- default: []
- ini:
- - section: ksu_become_plugin
- key: localized_prompts
- vars:
- - name: ansible_ksu_prompt_l10n
- env:
- - name: ANSIBLE_KSU_PROMPT_L10N
-'''
+ - List of localized strings to match for prompt detection.
+ - If empty the plugin uses the built-in one.
+ type: list
+ elements: string
+ default: []
+ ini:
+ - section: ksu_become_plugin
+ key: localized_prompts
+ vars:
+ - name: ansible_ksu_prompt_l10n
+ env:
+ - name: ANSIBLE_KSU_PROMPT_L10N
+"""
import re
@@ -124,4 +123,4 @@ class BecomeModule(BecomeBase):
flags = self.get_option('become_flags')
user = self.get_option('become_user')
- return '%s %s %s -e %s ' % (exe, user, flags, self._build_success_command(cmd, shell))
+ return f'{exe} {user} {flags} -e {self._build_success_command(cmd, shell)} '
diff --git a/plugins/become/machinectl.py b/plugins/become/machinectl.py
index a0467c2c36..ad3daa916d 100644
--- a/plugins/become/machinectl.py
+++ b/plugins/become/machinectl.py
@@ -2,94 +2,94 @@
# Copyright (c) 2018, Ansible Project
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
-DOCUMENTATION = '''
- name: machinectl
- short_description: Systemd's machinectl privilege escalation
- description:
- - This become plugins allows your remote/login user to execute commands as another user via the machinectl utility.
- author: Ansible Core Team
- options:
- become_user:
- description: User you 'become' to execute the task.
- type: string
- default: ''
- ini:
- - section: privilege_escalation
- key: become_user
- - section: machinectl_become_plugin
- key: user
- vars:
- - name: ansible_become_user
- - name: ansible_machinectl_user
- env:
- - name: ANSIBLE_BECOME_USER
- - name: ANSIBLE_MACHINECTL_USER
- become_exe:
- description: Machinectl executable.
- type: string
- default: machinectl
- ini:
- - section: privilege_escalation
- key: become_exe
- - section: machinectl_become_plugin
- key: executable
- vars:
- - name: ansible_become_exe
- - name: ansible_machinectl_exe
- env:
- - name: ANSIBLE_BECOME_EXE
- - name: ANSIBLE_MACHINECTL_EXE
- become_flags:
- description: Options to pass to machinectl.
- type: string
- default: ''
- ini:
- - section: privilege_escalation
- key: become_flags
- - section: machinectl_become_plugin
- key: flags
- vars:
- - name: ansible_become_flags
- - name: ansible_machinectl_flags
- env:
- - name: ANSIBLE_BECOME_FLAGS
- - name: ANSIBLE_MACHINECTL_FLAGS
- become_pass:
- description: Password for machinectl.
- type: string
- required: false
- vars:
- - name: ansible_become_password
- - name: ansible_become_pass
- - name: ansible_machinectl_pass
- env:
- - name: ANSIBLE_BECOME_PASS
- - name: ANSIBLE_MACHINECTL_PASS
- ini:
- - section: machinectl_become_plugin
- key: password
- notes:
- - When not using this plugin with user V(root), it only works correctly with a polkit rule which will alter
- the behaviour of machinectl. This rule must alter the prompt behaviour to ask directly for the user credentials,
- if the user is allowed to perform the action (take a look at the examples section).
- If such a rule is not present the plugin only work if it is used in context with the root user,
- because then no further prompt will be shown by machinectl.
-'''
+DOCUMENTATION = r"""
+name: machinectl
+short_description: Systemd's machinectl privilege escalation
+description:
+ - This become plugins allows your remote/login user to execute commands as another user using the C(machinectl) utility.
+author: Ansible Core Team
+options:
+ become_user:
+ description: User you 'become' to execute the task.
+ type: string
+ default: ''
+ ini:
+ - section: privilege_escalation
+ key: become_user
+ - section: machinectl_become_plugin
+ key: user
+ vars:
+ - name: ansible_become_user
+ - name: ansible_machinectl_user
+ env:
+ - name: ANSIBLE_BECOME_USER
+ - name: ANSIBLE_MACHINECTL_USER
+ become_exe:
+ description: C(machinectl) executable.
+ type: string
+ default: machinectl
+ ini:
+ - section: privilege_escalation
+ key: become_exe
+ - section: machinectl_become_plugin
+ key: executable
+ vars:
+ - name: ansible_become_exe
+ - name: ansible_machinectl_exe
+ env:
+ - name: ANSIBLE_BECOME_EXE
+ - name: ANSIBLE_MACHINECTL_EXE
+ become_flags:
+ description: Options to pass to C(machinectl).
+ type: string
+ default: ''
+ ini:
+ - section: privilege_escalation
+ key: become_flags
+ - section: machinectl_become_plugin
+ key: flags
+ vars:
+ - name: ansible_become_flags
+ - name: ansible_machinectl_flags
+ env:
+ - name: ANSIBLE_BECOME_FLAGS
+ - name: ANSIBLE_MACHINECTL_FLAGS
+ become_pass:
+ description: Password for C(machinectl).
+ type: string
+ required: false
+ vars:
+ - name: ansible_become_password
+ - name: ansible_become_pass
+ - name: ansible_machinectl_pass
+ env:
+ - name: ANSIBLE_BECOME_PASS
+ - name: ANSIBLE_MACHINECTL_PASS
+ ini:
+ - section: machinectl_become_plugin
+ key: password
+notes:
+ - When not using this plugin with user V(root), it only works correctly with a polkit rule which alters the behaviour
+ of C(machinectl). This rule must alter the prompt behaviour to ask directly for the user credentials, if the user is allowed
+ to perform the action (take a look at the examples section). If such a rule is not present the plugin only works if it
+ is used in context with the root user, because then no further prompt is shown by C(machinectl).
+ - This become plugin does not work when connection pipelining is enabled. With ansible-core 2.19+, using it automatically
+ disables pipelining. On ansible-core 2.18 and before, pipelining must explicitly be disabled by the user.
+"""
-EXAMPLES = r'''
+EXAMPLES = r"""
# A polkit rule needed to use the module with a non-root user.
# See the Notes section for details.
-/etc/polkit-1/rules.d/60-machinectl-fast-user-auth.rules: |
+/etc/polkit-1/rules.d/60-machinectl-fast-user-auth.rules: |-
polkit.addRule(function(action, subject) {
if(action.id == "org.freedesktop.machine1.host-shell" &&
subject.isInGroup("wheel")) {
return polkit.Result.AUTH_SELF_KEEP;
}
});
-'''
+"""
from re import compile as re_compile
@@ -109,6 +109,10 @@ class BecomeModule(BecomeBase):
success = ('==== AUTHENTICATION COMPLETE ====',)
require_tty = True # see https://github.com/ansible-collections/community.general/issues/6932
+ # See https://github.com/ansible/ansible/issues/81254,
+ # https://github.com/ansible/ansible/pull/78111
+ pipelining = False
+
@staticmethod
def remove_ansi_codes(line):
return ansi_color_codes.sub(b"", line)
@@ -123,7 +127,7 @@ class BecomeModule(BecomeBase):
flags = self.get_option('become_flags')
user = self.get_option('become_user')
- return '%s -q shell %s %s@ %s' % (become, flags, user, self._build_success_command(cmd, shell))
+ return f'{become} -q shell {flags} {user}@ {self._build_success_command(cmd, shell)}'
def check_success(self, b_output):
b_output = self.remove_ansi_codes(b_output)
diff --git a/plugins/become/pbrun.py b/plugins/become/pbrun.py
index 8a96b75797..92a49fe349 100644
--- a/plugins/become/pbrun.py
+++ b/plugins/become/pbrun.py
@@ -2,87 +2,86 @@
# Copyright (c) 2018, Ansible Project
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
-DOCUMENTATION = '''
- name: pbrun
- short_description: PowerBroker run
- description:
- - This become plugins allows your remote/login user to execute commands as another user via the pbrun utility.
- author: Ansible Core Team
- options:
- become_user:
- description: User you 'become' to execute the task.
- type: string
- default: ''
- ini:
- - section: privilege_escalation
- key: become_user
- - section: pbrun_become_plugin
- key: user
- vars:
- - name: ansible_become_user
- - name: ansible_pbrun_user
- env:
- - name: ANSIBLE_BECOME_USER
- - name: ANSIBLE_PBRUN_USER
- become_exe:
- description: Sudo executable.
- type: string
- default: pbrun
- ini:
- - section: privilege_escalation
- key: become_exe
- - section: pbrun_become_plugin
- key: executable
- vars:
- - name: ansible_become_exe
- - name: ansible_pbrun_exe
- env:
- - name: ANSIBLE_BECOME_EXE
- - name: ANSIBLE_PBRUN_EXE
- become_flags:
- description: Options to pass to pbrun.
- type: string
- default: ''
- ini:
- - section: privilege_escalation
- key: become_flags
- - section: pbrun_become_plugin
- key: flags
- vars:
- - name: ansible_become_flags
- - name: ansible_pbrun_flags
- env:
- - name: ANSIBLE_BECOME_FLAGS
- - name: ANSIBLE_PBRUN_FLAGS
- become_pass:
- description: Password for pbrun.
- type: string
- required: false
- vars:
- - name: ansible_become_password
- - name: ansible_become_pass
- - name: ansible_pbrun_pass
- env:
- - name: ANSIBLE_BECOME_PASS
- - name: ANSIBLE_PBRUN_PASS
- ini:
- - section: pbrun_become_plugin
- key: password
- wrap_exe:
- description: Toggle to wrap the command pbrun calls in C(shell -c) or not.
- default: false
- type: bool
- ini:
- - section: pbrun_become_plugin
- key: wrap_execution
- vars:
- - name: ansible_pbrun_wrap_execution
- env:
- - name: ANSIBLE_PBRUN_WRAP_EXECUTION
-'''
+DOCUMENTATION = r"""
+name: pbrun
+short_description: PowerBroker run
+description:
+ - This become plugins allows your remote/login user to execute commands as another user using the C(pbrun) utility.
+author: Ansible Core Team
+options:
+ become_user:
+ description: User you 'become' to execute the task.
+ type: string
+ default: ''
+ ini:
+ - section: privilege_escalation
+ key: become_user
+ - section: pbrun_become_plugin
+ key: user
+ vars:
+ - name: ansible_become_user
+ - name: ansible_pbrun_user
+ env:
+ - name: ANSIBLE_BECOME_USER
+ - name: ANSIBLE_PBRUN_USER
+ become_exe:
+ description: C(pbrun) executable.
+ type: string
+ default: pbrun
+ ini:
+ - section: privilege_escalation
+ key: become_exe
+ - section: pbrun_become_plugin
+ key: executable
+ vars:
+ - name: ansible_become_exe
+ - name: ansible_pbrun_exe
+ env:
+ - name: ANSIBLE_BECOME_EXE
+ - name: ANSIBLE_PBRUN_EXE
+ become_flags:
+ description: Options to pass to C(pbrun).
+ type: string
+ default: ''
+ ini:
+ - section: privilege_escalation
+ key: become_flags
+ - section: pbrun_become_plugin
+ key: flags
+ vars:
+ - name: ansible_become_flags
+ - name: ansible_pbrun_flags
+ env:
+ - name: ANSIBLE_BECOME_FLAGS
+ - name: ANSIBLE_PBRUN_FLAGS
+ become_pass:
+ description: Password for C(pbrun).
+ type: string
+ required: false
+ vars:
+ - name: ansible_become_password
+ - name: ansible_become_pass
+ - name: ansible_pbrun_pass
+ env:
+ - name: ANSIBLE_BECOME_PASS
+ - name: ANSIBLE_PBRUN_PASS
+ ini:
+ - section: pbrun_become_plugin
+ key: password
+ wrap_exe:
+ description: Toggle to wrap the command C(pbrun) calls in C(shell -c) or not.
+ default: false
+ type: bool
+ ini:
+ - section: pbrun_become_plugin
+ key: wrap_execution
+ vars:
+ - name: ansible_pbrun_wrap_execution
+ env:
+ - name: ANSIBLE_PBRUN_WRAP_EXECUTION
+"""
from ansible.plugins.become import BecomeBase
@@ -103,7 +102,7 @@ class BecomeModule(BecomeBase):
flags = self.get_option('become_flags')
become_user = self.get_option('become_user')
- user = '-u %s' % (become_user) if become_user else ''
+ user = f'-u {become_user}' if become_user else ''
noexe = not self.get_option('wrap_exe')
- return ' '.join([become_exe, flags, user, self._build_success_command(cmd, shell, noexe=noexe)])
+ return f"{become_exe} {flags} {user} {self._build_success_command(cmd, shell, noexe=noexe)}"
diff --git a/plugins/become/pfexec.py b/plugins/become/pfexec.py
index d48d622713..9faf1ffc63 100644
--- a/plugins/become/pfexec.py
+++ b/plugins/become/pfexec.py
@@ -2,92 +2,91 @@
# Copyright (c) 2018, Ansible Project
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
-DOCUMENTATION = '''
- name: pfexec
- short_description: profile based execution
+DOCUMENTATION = r"""
+name: pfexec
+short_description: Profile based execution
+description:
+ - This become plugins allows your remote/login user to execute commands as another user using the C(pfexec) utility.
+author: Ansible Core Team
+options:
+ become_user:
description:
- - This become plugins allows your remote/login user to execute commands as another user via the pfexec utility.
- author: Ansible Core Team
- options:
- become_user:
- description:
- - User you 'become' to execute the task.
- - This plugin ignores this setting as pfexec uses it's own C(exec_attr) to figure this out,
- but it is supplied here for Ansible to make decisions needed for the task execution, like file permissions.
- type: string
- default: root
- ini:
- - section: privilege_escalation
- key: become_user
- - section: pfexec_become_plugin
- key: user
- vars:
- - name: ansible_become_user
- - name: ansible_pfexec_user
- env:
- - name: ANSIBLE_BECOME_USER
- - name: ANSIBLE_PFEXEC_USER
- become_exe:
- description: Sudo executable.
- type: string
- default: pfexec
- ini:
- - section: privilege_escalation
- key: become_exe
- - section: pfexec_become_plugin
- key: executable
- vars:
- - name: ansible_become_exe
- - name: ansible_pfexec_exe
- env:
- - name: ANSIBLE_BECOME_EXE
- - name: ANSIBLE_PFEXEC_EXE
- become_flags:
- description: Options to pass to pfexec.
- type: string
- default: -H -S -n
- ini:
- - section: privilege_escalation
- key: become_flags
- - section: pfexec_become_plugin
- key: flags
- vars:
- - name: ansible_become_flags
- - name: ansible_pfexec_flags
- env:
- - name: ANSIBLE_BECOME_FLAGS
- - name: ANSIBLE_PFEXEC_FLAGS
- become_pass:
- description: pfexec password.
- type: string
- required: false
- vars:
- - name: ansible_become_password
- - name: ansible_become_pass
- - name: ansible_pfexec_pass
- env:
- - name: ANSIBLE_BECOME_PASS
- - name: ANSIBLE_PFEXEC_PASS
- ini:
- - section: pfexec_become_plugin
- key: password
- wrap_exe:
- description: Toggle to wrap the command pfexec calls in C(shell -c) or not.
- default: false
- type: bool
- ini:
- - section: pfexec_become_plugin
- key: wrap_execution
- vars:
- - name: ansible_pfexec_wrap_execution
- env:
- - name: ANSIBLE_PFEXEC_WRAP_EXECUTION
- notes:
- - This plugin ignores O(become_user) as pfexec uses its own C(exec_attr) to figure this out.
-'''
+ - User you 'become' to execute the task.
+ - This plugin ignores this setting as pfexec uses its own C(exec_attr) to figure this out, but it is supplied here for
+ Ansible to make decisions needed for the task execution, like file permissions.
+ type: string
+ default: root
+ ini:
+ - section: privilege_escalation
+ key: become_user
+ - section: pfexec_become_plugin
+ key: user
+ vars:
+ - name: ansible_become_user
+ - name: ansible_pfexec_user
+ env:
+ - name: ANSIBLE_BECOME_USER
+ - name: ANSIBLE_PFEXEC_USER
+ become_exe:
+ description: C(pfexec) executable.
+ type: string
+ default: pfexec
+ ini:
+ - section: privilege_escalation
+ key: become_exe
+ - section: pfexec_become_plugin
+ key: executable
+ vars:
+ - name: ansible_become_exe
+ - name: ansible_pfexec_exe
+ env:
+ - name: ANSIBLE_BECOME_EXE
+ - name: ANSIBLE_PFEXEC_EXE
+ become_flags:
+ description: Options to pass to C(pfexec).
+ type: string
+ default: -H -S -n
+ ini:
+ - section: privilege_escalation
+ key: become_flags
+ - section: pfexec_become_plugin
+ key: flags
+ vars:
+ - name: ansible_become_flags
+ - name: ansible_pfexec_flags
+ env:
+ - name: ANSIBLE_BECOME_FLAGS
+ - name: ANSIBLE_PFEXEC_FLAGS
+ become_pass:
+ description: C(pfexec) password.
+ type: string
+ required: false
+ vars:
+ - name: ansible_become_password
+ - name: ansible_become_pass
+ - name: ansible_pfexec_pass
+ env:
+ - name: ANSIBLE_BECOME_PASS
+ - name: ANSIBLE_PFEXEC_PASS
+ ini:
+ - section: pfexec_become_plugin
+ key: password
+ wrap_exe:
+ description: Toggle to wrap the command C(pfexec) calls in C(shell -c) or not.
+ default: false
+ type: bool
+ ini:
+ - section: pfexec_become_plugin
+ key: wrap_execution
+ vars:
+ - name: ansible_pfexec_wrap_execution
+ env:
+ - name: ANSIBLE_PFEXEC_WRAP_EXECUTION
+notes:
+ - This plugin ignores O(become_user) as pfexec uses its own C(exec_attr) to figure this out.
+"""
from ansible.plugins.become import BecomeBase
@@ -106,4 +105,4 @@ class BecomeModule(BecomeBase):
flags = self.get_option('become_flags')
noexe = not self.get_option('wrap_exe')
- return '%s %s %s' % (exe, flags, self._build_success_command(cmd, shell, noexe=noexe))
+ return f'{exe} {flags} {self._build_success_command(cmd, shell, noexe=noexe)}'
diff --git a/plugins/become/pmrun.py b/plugins/become/pmrun.py
index 908c5e759d..a2432d92ee 100644
--- a/plugins/become/pmrun.py
+++ b/plugins/become/pmrun.py
@@ -2,63 +2,62 @@
# Copyright (c) 2018, Ansible Project
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
-DOCUMENTATION = '''
- name: pmrun
- short_description: Privilege Manager run
- description:
- - This become plugins allows your remote/login user to execute commands as another user via the pmrun utility.
- author: Ansible Core Team
- options:
- become_exe:
- description: Sudo executable
- type: string
- default: pmrun
- ini:
- - section: privilege_escalation
- key: become_exe
- - section: pmrun_become_plugin
- key: executable
- vars:
- - name: ansible_become_exe
- - name: ansible_pmrun_exe
- env:
- - name: ANSIBLE_BECOME_EXE
- - name: ANSIBLE_PMRUN_EXE
- become_flags:
- description: Options to pass to pmrun.
- type: string
- default: ''
- ini:
- - section: privilege_escalation
- key: become_flags
- - section: pmrun_become_plugin
- key: flags
- vars:
- - name: ansible_become_flags
- - name: ansible_pmrun_flags
- env:
- - name: ANSIBLE_BECOME_FLAGS
- - name: ANSIBLE_PMRUN_FLAGS
- become_pass:
- description: pmrun password.
- type: string
- required: false
- vars:
- - name: ansible_become_password
- - name: ansible_become_pass
- - name: ansible_pmrun_pass
- env:
- - name: ANSIBLE_BECOME_PASS
- - name: ANSIBLE_PMRUN_PASS
- ini:
- - section: pmrun_become_plugin
- key: password
- notes:
- - This plugin ignores the become_user supplied and uses pmrun's own configuration to select the user.
-'''
+DOCUMENTATION = r"""
+name: pmrun
+short_description: Privilege Manager run
+description:
+ - This become plugins allows your remote/login user to execute commands as another user using the C(pmrun) utility.
+author: Ansible Core Team
+options:
+ become_exe:
+ description: C(pmrun) executable.
+ type: string
+ default: pmrun
+ ini:
+ - section: privilege_escalation
+ key: become_exe
+ - section: pmrun_become_plugin
+ key: executable
+ vars:
+ - name: ansible_become_exe
+ - name: ansible_pmrun_exe
+ env:
+ - name: ANSIBLE_BECOME_EXE
+ - name: ANSIBLE_PMRUN_EXE
+ become_flags:
+ description: Options to pass to C(pmrun).
+ type: string
+ default: ''
+ ini:
+ - section: privilege_escalation
+ key: become_flags
+ - section: pmrun_become_plugin
+ key: flags
+ vars:
+ - name: ansible_become_flags
+ - name: ansible_pmrun_flags
+ env:
+ - name: ANSIBLE_BECOME_FLAGS
+ - name: ANSIBLE_PMRUN_FLAGS
+ become_pass:
+ description: C(pmrun) password.
+ type: string
+ required: false
+ vars:
+ - name: ansible_become_password
+ - name: ansible_become_pass
+ - name: ansible_pmrun_pass
+ env:
+ - name: ANSIBLE_BECOME_PASS
+ - name: ANSIBLE_PMRUN_PASS
+ ini:
+ - section: pmrun_become_plugin
+ key: password
+notes:
+ - This plugin ignores the C(become_user) supplied and uses C(pmrun)'s own configuration to select the user.
+"""
from ansible.plugins.become import BecomeBase
from ansible.module_utils.six.moves import shlex_quote
@@ -78,4 +77,4 @@ class BecomeModule(BecomeBase):
become = self.get_option('become_exe')
flags = self.get_option('become_flags')
- return '%s %s %s' % (become, flags, shlex_quote(self._build_success_command(cmd, shell)))
+ return f'{become} {flags} {shlex_quote(self._build_success_command(cmd, shell))}'
diff --git a/plugins/become/run0.py b/plugins/become/run0.py
index a718e86f24..dce7c22448 100644
--- a/plugins/become/run0.py
+++ b/plugins/become/run0.py
@@ -3,72 +3,71 @@
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import absolute_import, division, print_function
+from __future__ import annotations
-__metaclass__ = type
-DOCUMENTATION = """
- name: run0
- short_description: Systemd's run0
- description:
- - This become plugins allows your remote/login user to execute commands as another user via the C(run0) utility.
- author:
- - Thomas Sjögren (@konstruktoid)
- version_added: '9.0.0'
- options:
- become_user:
- description: User you 'become' to execute the task.
- default: root
- ini:
- - section: privilege_escalation
- key: become_user
- - section: run0_become_plugin
- key: user
- vars:
- - name: ansible_become_user
- - name: ansible_run0_user
- env:
- - name: ANSIBLE_BECOME_USER
- - name: ANSIBLE_RUN0_USER
- type: string
- become_exe:
- description: The C(run0) executable.
- default: run0
- ini:
- - section: privilege_escalation
- key: become_exe
- - section: run0_become_plugin
- key: executable
- vars:
- - name: ansible_become_exe
- - name: ansible_run0_exe
- env:
- - name: ANSIBLE_BECOME_EXE
- - name: ANSIBLE_RUN0_EXE
- type: string
- become_flags:
- description: Options to pass to run0.
- default: ''
- ini:
- - section: privilege_escalation
- key: become_flags
- - section: run0_become_plugin
- key: flags
- vars:
- - name: ansible_become_flags
- - name: ansible_run0_flags
- env:
- - name: ANSIBLE_BECOME_FLAGS
- - name: ANSIBLE_RUN0_FLAGS
- type: string
- notes:
- - This plugin will only work when a polkit rule is in place.
+DOCUMENTATION = r"""
+name: run0
+short_description: Systemd's run0
+description:
+ - This become plugins allows your remote/login user to execute commands as another user using the C(run0) utility.
+author:
+ - Thomas Sjögren (@konstruktoid)
+version_added: '9.0.0'
+options:
+ become_user:
+ description: User you 'become' to execute the task.
+ default: root
+ ini:
+ - section: privilege_escalation
+ key: become_user
+ - section: run0_become_plugin
+ key: user
+ vars:
+ - name: ansible_become_user
+ - name: ansible_run0_user
+ env:
+ - name: ANSIBLE_BECOME_USER
+ - name: ANSIBLE_RUN0_USER
+ type: string
+ become_exe:
+ description: C(run0) executable.
+ default: run0
+ ini:
+ - section: privilege_escalation
+ key: become_exe
+ - section: run0_become_plugin
+ key: executable
+ vars:
+ - name: ansible_become_exe
+ - name: ansible_run0_exe
+ env:
+ - name: ANSIBLE_BECOME_EXE
+ - name: ANSIBLE_RUN0_EXE
+ type: string
+ become_flags:
+ description: Options to pass to C(run0).
+ default: ''
+ ini:
+ - section: privilege_escalation
+ key: become_flags
+ - section: run0_become_plugin
+ key: flags
+ vars:
+ - name: ansible_become_flags
+ - name: ansible_run0_flags
+ env:
+ - name: ANSIBLE_BECOME_FLAGS
+ - name: ANSIBLE_RUN0_FLAGS
+ type: string
+notes:
+ - This plugin only works when a C(polkit) rule is in place.
"""
EXAMPLES = r"""
# An example polkit rule that allows the user 'ansible' in the 'wheel' group
# to execute commands using run0 without authentication.
-/etc/polkit-1/rules.d/60-run0-fast-user-auth.rules: |
+/etc/polkit-1/rules.d/60-run0-fast-user-auth.rules: |-
polkit.addRule(function(action, subject) {
if(action.id == "org.freedesktop.systemd1.manage-units" &&
subject.isInGroup("wheel") &&
diff --git a/plugins/become/sesu.py b/plugins/become/sesu.py
index 4dcb837e70..cf921e2e47 100644
--- a/plugins/become/sesu.py
+++ b/plugins/become/sesu.py
@@ -2,76 +2,75 @@
# Copyright (c) 2018, Ansible Project
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
-DOCUMENTATION = '''
- name: sesu
- short_description: CA Privileged Access Manager
- description:
- - This become plugins allows your remote/login user to execute commands as another user via the sesu utility.
- author: ansible (@nekonyuu)
- options:
- become_user:
- description: User you 'become' to execute the task.
- type: string
- default: ''
- ini:
- - section: privilege_escalation
- key: become_user
- - section: sesu_become_plugin
- key: user
- vars:
- - name: ansible_become_user
- - name: ansible_sesu_user
- env:
- - name: ANSIBLE_BECOME_USER
- - name: ANSIBLE_SESU_USER
- become_exe:
- description: sesu executable.
- type: string
- default: sesu
- ini:
- - section: privilege_escalation
- key: become_exe
- - section: sesu_become_plugin
- key: executable
- vars:
- - name: ansible_become_exe
- - name: ansible_sesu_exe
- env:
- - name: ANSIBLE_BECOME_EXE
- - name: ANSIBLE_SESU_EXE
- become_flags:
- description: Options to pass to sesu.
- type: string
- default: -H -S -n
- ini:
- - section: privilege_escalation
- key: become_flags
- - section: sesu_become_plugin
- key: flags
- vars:
- - name: ansible_become_flags
- - name: ansible_sesu_flags
- env:
- - name: ANSIBLE_BECOME_FLAGS
- - name: ANSIBLE_SESU_FLAGS
- become_pass:
- description: Password to pass to sesu.
- type: string
- required: false
- vars:
- - name: ansible_become_password
- - name: ansible_become_pass
- - name: ansible_sesu_pass
- env:
- - name: ANSIBLE_BECOME_PASS
- - name: ANSIBLE_SESU_PASS
- ini:
- - section: sesu_become_plugin
- key: password
-'''
+DOCUMENTATION = r"""
+name: sesu
+short_description: CA Privileged Access Manager
+description:
+ - This become plugins allows your remote/login user to execute commands as another user using the C(sesu) utility.
+author: ansible (@nekonyuu)
+options:
+ become_user:
+ description: User you 'become' to execute the task.
+ type: string
+ default: ''
+ ini:
+ - section: privilege_escalation
+ key: become_user
+ - section: sesu_become_plugin
+ key: user
+ vars:
+ - name: ansible_become_user
+ - name: ansible_sesu_user
+ env:
+ - name: ANSIBLE_BECOME_USER
+ - name: ANSIBLE_SESU_USER
+ become_exe:
+ description: C(sesu) executable.
+ type: string
+ default: sesu
+ ini:
+ - section: privilege_escalation
+ key: become_exe
+ - section: sesu_become_plugin
+ key: executable
+ vars:
+ - name: ansible_become_exe
+ - name: ansible_sesu_exe
+ env:
+ - name: ANSIBLE_BECOME_EXE
+ - name: ANSIBLE_SESU_EXE
+ become_flags:
+ description: Options to pass to C(sesu).
+ type: string
+ default: -H -S -n
+ ini:
+ - section: privilege_escalation
+ key: become_flags
+ - section: sesu_become_plugin
+ key: flags
+ vars:
+ - name: ansible_become_flags
+ - name: ansible_sesu_flags
+ env:
+ - name: ANSIBLE_BECOME_FLAGS
+ - name: ANSIBLE_SESU_FLAGS
+ become_pass:
+ description: Password to pass to C(sesu).
+ type: string
+ required: false
+ vars:
+ - name: ansible_become_password
+ - name: ansible_become_pass
+ - name: ansible_sesu_pass
+ env:
+ - name: ANSIBLE_BECOME_PASS
+ - name: ANSIBLE_SESU_PASS
+ ini:
+ - section: sesu_become_plugin
+ key: password
+"""
from ansible.plugins.become import BecomeBase
@@ -93,4 +92,4 @@ class BecomeModule(BecomeBase):
flags = self.get_option('become_flags')
user = self.get_option('become_user')
- return '%s %s %s -c %s' % (become, flags, user, self._build_success_command(cmd, shell))
+ return f'{become} {flags} {user} -c {self._build_success_command(cmd, shell)}'
diff --git a/plugins/become/sudosu.py b/plugins/become/sudosu.py
index 5454fd2316..509b2725df 100644
--- a/plugins/become/sudosu.py
+++ b/plugins/become/sudosu.py
@@ -2,77 +2,77 @@
# Copyright (c) 2021, Ansible Project
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
-DOCUMENTATION = """
- name: sudosu
- short_description: Run tasks using sudo su -
+DOCUMENTATION = r"""
+name: sudosu
+short_description: Run tasks using sudo su -
+description:
+ - This become plugin allows your remote/login user to execute commands as another user using the C(sudo) and C(su) utilities
+ combined.
+author:
+ - Dag Wieers (@dagwieers)
+version_added: 2.4.0
+options:
+ become_user:
+ description: User you 'become' to execute the task.
+ type: string
+ default: root
+ ini:
+ - section: privilege_escalation
+ key: become_user
+ - section: sudo_become_plugin
+ key: user
+ vars:
+ - name: ansible_become_user
+ - name: ansible_sudo_user
+ env:
+ - name: ANSIBLE_BECOME_USER
+ - name: ANSIBLE_SUDO_USER
+ become_flags:
+ description: Options to pass to C(sudo).
+ type: string
+ default: -H -S -n
+ ini:
+ - section: privilege_escalation
+ key: become_flags
+ - section: sudo_become_plugin
+ key: flags
+ vars:
+ - name: ansible_become_flags
+ - name: ansible_sudo_flags
+ env:
+ - name: ANSIBLE_BECOME_FLAGS
+ - name: ANSIBLE_SUDO_FLAGS
+ become_pass:
+ description: Password to pass to C(sudo).
+ type: string
+ required: false
+ vars:
+ - name: ansible_become_password
+ - name: ansible_become_pass
+ - name: ansible_sudo_pass
+ env:
+ - name: ANSIBLE_BECOME_PASS
+ - name: ANSIBLE_SUDO_PASS
+ ini:
+ - section: sudo_become_plugin
+ key: password
+ alt_method:
description:
- - This become plugin allows your remote/login user to execute commands as another user via the C(sudo) and C(su) utilities combined.
- author:
- - Dag Wieers (@dagwieers)
- version_added: 2.4.0
- options:
- become_user:
- description: User you 'become' to execute the task.
- type: string
- default: root
- ini:
- - section: privilege_escalation
- key: become_user
- - section: sudo_become_plugin
- key: user
- vars:
- - name: ansible_become_user
- - name: ansible_sudo_user
- env:
- - name: ANSIBLE_BECOME_USER
- - name: ANSIBLE_SUDO_USER
- become_flags:
- description: Options to pass to C(sudo).
- type: string
- default: -H -S -n
- ini:
- - section: privilege_escalation
- key: become_flags
- - section: sudo_become_plugin
- key: flags
- vars:
- - name: ansible_become_flags
- - name: ansible_sudo_flags
- env:
- - name: ANSIBLE_BECOME_FLAGS
- - name: ANSIBLE_SUDO_FLAGS
- become_pass:
- description: Password to pass to C(sudo).
- type: string
- required: false
- vars:
- - name: ansible_become_password
- - name: ansible_become_pass
- - name: ansible_sudo_pass
- env:
- - name: ANSIBLE_BECOME_PASS
- - name: ANSIBLE_SUDO_PASS
- ini:
- - section: sudo_become_plugin
- key: password
- alt_method:
- description:
- - Whether to use an alternative method to call C(su). Instead of running C(su -l user /path/to/shell -c command),
- it runs C(su -l user -c command).
- - Use this when the default one is not working on your system.
- required: false
- type: boolean
- ini:
- - section: community.general.sudosu
- key: alternative_method
- vars:
- - name: ansible_sudosu_alt_method
- env:
- - name: ANSIBLE_SUDOSU_ALT_METHOD
- version_added: 9.2.0
+ - Whether to use an alternative method to call C(su). Instead of running C(su -l user /path/to/shell -c command), it
+ runs C(su -l user -c command).
+ - Use this when the default one is not working on your system.
+ required: false
+ type: boolean
+ ini:
+ - section: community.general.sudosu
+ key: alternative_method
+ vars:
+ - name: ansible_sudosu_alt_method
+ env:
+ - name: ANSIBLE_SUDOSU_ALT_METHOD
+ version_added: 9.2.0
"""
@@ -98,16 +98,16 @@ class BecomeModule(BecomeBase):
flags = self.get_option('become_flags') or ''
prompt = ''
if self.get_option('become_pass'):
- self.prompt = '[sudo via ansible, key=%s] password:' % self._id
+ self.prompt = f'[sudo via ansible, key={self._id}] password:'
if flags: # this could be simplified, but kept as is for now for backwards string matching
flags = flags.replace('-n', '')
- prompt = '-p "%s"' % (self.prompt)
+ prompt = f'-p "{self.prompt}"'
user = self.get_option('become_user') or ''
if user:
- user = '%s' % (user)
+ user = f'{user}'
if self.get_option('alt_method'):
- return ' '.join([becomecmd, flags, prompt, "su -l", user, "-c", self._build_success_command(cmd, shell, True)])
+ return f"{becomecmd} {flags} {prompt} su -l {user} -c {self._build_success_command(cmd, shell, True)}"
else:
- return ' '.join([becomecmd, flags, prompt, 'su -l', user, self._build_success_command(cmd, shell)])
+ return f"{becomecmd} {flags} {prompt} su -l {user} {self._build_success_command(cmd, shell)}"
diff --git a/plugins/cache/memcached.py b/plugins/cache/memcached.py
index 5e066d626c..9c4fbec595 100644
--- a/plugins/cache/memcached.py
+++ b/plugins/cache/memcached.py
@@ -4,8 +4,7 @@
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
DOCUMENTATION = r"""
author: Unknown (!UNKNOWN)
@@ -191,7 +190,7 @@ class CacheModule(BaseCacheModule):
self._keys = CacheModuleKeys(self._db, self._db.get(CacheModuleKeys.PREFIX) or [])
def _make_key(self, key):
- return "{0}{1}".format(self._prefix, key)
+ return f"{self._prefix}{key}"
def _expire_keys(self):
if self._timeout > 0:
diff --git a/plugins/cache/pickle.py b/plugins/cache/pickle.py
index 1fb756c873..1e9ffcb264 100644
--- a/plugins/cache/pickle.py
+++ b/plugins/cache/pickle.py
@@ -5,8 +5,7 @@
# SPDX-License-Identifier: GPL-3.0-or-later
# Make coding more python3-ish
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
DOCUMENTATION = r"""
name: pickle
@@ -18,7 +17,7 @@ options:
_uri:
required: true
description:
- - Path in which the cache plugin will save the files.
+ - Path in which the cache plugin saves the files.
env:
- name: ANSIBLE_CACHE_PLUGIN_CONNECTION
ini:
diff --git a/plugins/cache/redis.py b/plugins/cache/redis.py
index 6ceda73910..41f69d659f 100644
--- a/plugins/cache/redis.py
+++ b/plugins/cache/redis.py
@@ -3,8 +3,7 @@
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
DOCUMENTATION = r"""
author: Unknown (!UNKNOWN)
@@ -73,7 +72,6 @@ import time
import json
from ansible.errors import AnsibleError
-from ansible.module_utils.common.text.converters import to_native
from ansible.parsing.ajson import AnsibleJSONEncoder, AnsibleJSONDecoder
from ansible.plugins.cache import BaseCacheModule
from ansible.utils.display import Display
@@ -131,7 +129,7 @@ class CacheModule(BaseCacheModule):
connection = self._parse_connection(self.re_url_conn, uri)
self._db = StrictRedis(*connection, **kw)
- display.vv('Redis connection: %s' % self._db)
+ display.vv(f'Redis connection: {self._db}')
@staticmethod
def _parse_connection(re_patt, uri):
@@ -164,12 +162,12 @@ class CacheModule(BaseCacheModule):
pass # password is optional
sentinels = [self._parse_connection(self.re_sent_conn, shost) for shost in connections]
- display.vv('\nUsing redis sentinels: %s' % sentinels)
+ display.vv(f'\nUsing redis sentinels: {sentinels}')
scon = Sentinel(sentinels, **kw)
try:
return scon.master_for(self._sentinel_service_name, socket_timeout=0.2)
except Exception as exc:
- raise AnsibleError('Could not connect to redis sentinel: %s' % to_native(exc))
+ raise AnsibleError(f'Could not connect to redis sentinel: {exc}')
def _make_key(self, key):
return self._prefix + key
diff --git a/plugins/cache/yaml.py b/plugins/cache/yaml.py
index 88cdad2acb..8bf61f6898 100644
--- a/plugins/cache/yaml.py
+++ b/plugins/cache/yaml.py
@@ -5,8 +5,7 @@
# SPDX-License-Identifier: GPL-3.0-or-later
# Make coding more python3-ish
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
DOCUMENTATION = r"""
name: yaml
@@ -18,7 +17,7 @@ options:
_uri:
required: true
description:
- - Path in which the cache plugin will save the files.
+ - Path in which the cache plugin saves the files.
env:
- name: ANSIBLE_CACHE_PLUGIN_CONNECTION
ini:
diff --git a/plugins/callback/cgroup_memory_recap.py b/plugins/callback/cgroup_memory_recap.py
index d33bc091d1..b4099eae49 100644
--- a/plugins/callback/cgroup_memory_recap.py
+++ b/plugins/callback/cgroup_memory_recap.py
@@ -4,8 +4,7 @@
# SPDX-License-Identifier: GPL-3.0-or-later
# Make coding more python3-ish
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
DOCUMENTATION = r"""
author: Unknown (!UNKNOWN)
@@ -115,7 +114,7 @@ class CallbackModule(CallbackBase):
max_results = int(f.read().strip()) / 1024 / 1024
self._display.banner('CGROUP MEMORY RECAP')
- self._display.display('Execution Maximum: %0.2fMB\n\n' % max_results)
+ self._display.display(f'Execution Maximum: {max_results:0.2f}MB\n\n')
for task, memory in self.task_results:
- self._display.display('%s (%s): %0.2fMB' % (task.get_name(), task._uuid, memory))
+ self._display.display(f'{task.get_name()} ({task._uuid}): {memory:0.2f}MB')
diff --git a/plugins/callback/context_demo.py b/plugins/callback/context_demo.py
index 335a765340..e846aa2786 100644
--- a/plugins/callback/context_demo.py
+++ b/plugins/callback/context_demo.py
@@ -4,14 +4,13 @@
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
DOCUMENTATION = r"""
author: Unknown (!UNKNOWN)
name: context_demo
type: aggregate
-short_description: demo callback that adds play/task context
+short_description: Demo callback that adds play/task context
description:
- Displays some play and task context along with normal output.
- This is mostly for demo purposes.
@@ -38,15 +37,15 @@ class CallbackModule(CallbackBase):
self.play = None
def v2_on_any(self, *args, **kwargs):
- self._display.display("--- play: {0} task: {1} ---".format(getattr(self.play, 'name', None), self.task))
+ self._display.display(f"--- play: {getattr(self.play, 'name', None)} task: {self.task} ---")
self._display.display(" --- ARGS ")
for i, a in enumerate(args):
- self._display.display(' %s: %s' % (i, a))
+ self._display.display(f' {i}: {a}')
self._display.display(" --- KWARGS ")
for k in kwargs:
- self._display.display(' %s: %s' % (k, kwargs[k]))
+ self._display.display(f' {k}: {kwargs[k]}')
def v2_playbook_on_play_start(self, play):
self.play = play
diff --git a/plugins/callback/counter_enabled.py b/plugins/callback/counter_enabled.py
index b441ae97f5..2377d46585 100644
--- a/plugins/callback/counter_enabled.py
+++ b/plugins/callback/counter_enabled.py
@@ -6,18 +6,17 @@
Counter enabled Ansible callback plugin (See DOCUMENTATION for more information)
'''
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
DOCUMENTATION = r"""
author: Unknown (!UNKNOWN)
name: counter_enabled
type: stdout
-short_description: adds counters to the output items (tasks and hosts/task)
+short_description: Adds counters to the output items (tasks and hosts/task)
description:
- Use this callback when you need a kind of progress bar on a large environments.
- - You will know how many tasks has the playbook to run, and which one is actually running.
- - You will know how many hosts may run a task, and which of them is actually running.
+ - You can see how many tasks has the playbook to run, and which one is actually running.
+ - You can see how many hosts may run a task, and which of them is actually running.
extends_documentation_fragment:
- default_callback
requirements:
@@ -71,7 +70,7 @@ class CallbackModule(CallbackBase):
if not name:
msg = u"play"
else:
- msg = u"PLAY [%s]" % name
+ msg = f"PLAY [{name}]"
self._play = play
@@ -91,25 +90,17 @@ class CallbackModule(CallbackBase):
for host in hosts:
stat = stats.summarize(host)
- self._display.display(u"%s : %s %s %s %s %s %s" % (
- hostcolor(host, stat),
- colorize(u'ok', stat['ok'], C.COLOR_OK),
- colorize(u'changed', stat['changed'], C.COLOR_CHANGED),
- colorize(u'unreachable', stat['unreachable'], C.COLOR_UNREACHABLE),
- colorize(u'failed', stat['failures'], C.COLOR_ERROR),
- colorize(u'rescued', stat['rescued'], C.COLOR_OK),
- colorize(u'ignored', stat['ignored'], C.COLOR_WARN)),
+ self._display.display(
+ f"{hostcolor(host, stat)} : {colorize('ok', stat['ok'], C.COLOR_OK)} {colorize('changed', stat['changed'], C.COLOR_CHANGED)} "
+ f"{colorize('unreachable', stat['unreachable'], C.COLOR_UNREACHABLE)} {colorize('failed', stat['failures'], C.COLOR_ERROR)} "
+ f"{colorize('rescued', stat['rescued'], C.COLOR_OK)} {colorize('ignored', stat['ignored'], C.COLOR_WARN)}",
screen_only=True
)
- self._display.display(u"%s : %s %s %s %s %s %s" % (
- hostcolor(host, stat, False),
- colorize(u'ok', stat['ok'], None),
- colorize(u'changed', stat['changed'], None),
- colorize(u'unreachable', stat['unreachable'], None),
- colorize(u'failed', stat['failures'], None),
- colorize(u'rescued', stat['rescued'], None),
- colorize(u'ignored', stat['ignored'], None)),
+ self._display.display(
+ f"{hostcolor(host, stat, False)} : {colorize('ok', stat['ok'], None)} {colorize('changed', stat['changed'], None)} "
+ f"{colorize('unreachable', stat['unreachable'], None)} {colorize('failed', stat['failures'], None)} "
+ f"{colorize('rescued', stat['rescued'], None)} {colorize('ignored', stat['ignored'], None)}",
log_only=True
)
@@ -124,12 +115,14 @@ class CallbackModule(CallbackBase):
for k in sorted(stats.custom.keys()):
if k == '_run':
continue
- self._display.display('\t%s: %s' % (k, self._dump_results(stats.custom[k], indent=1).replace('\n', '')))
+ _custom_stats = self._dump_results(stats.custom[k], indent=1).replace('\n', '')
+ self._display.display(f'\t{k}: {_custom_stats}')
# print per run custom stats
if '_run' in stats.custom:
self._display.display("", screen_only=True)
- self._display.display('\tRUN: %s' % self._dump_results(stats.custom['_run'], indent=1).replace('\n', ''))
+ _custom_stats_run = self._dump_results(stats.custom['_run'], indent=1).replace('\n', '')
+ self._display.display(f'\tRUN: {_custom_stats_run}')
self._display.display("", screen_only=True)
def v2_playbook_on_task_start(self, task, is_conditional):
@@ -143,13 +136,13 @@ class CallbackModule(CallbackBase):
# that they can secure this if they feel that their stdout is insecure
# (shoulder surfing, logging stdout straight to a file, etc).
if not task.no_log and C.DISPLAY_ARGS_TO_STDOUT:
- args = ', '.join(('%s=%s' % a for a in task.args.items()))
- args = ' %s' % args
- self._display.banner("TASK %d/%d [%s%s]" % (self._task_counter, self._task_total, task.get_name().strip(), args))
+ args = ', '.join(('{k}={v}' for k, v in task.args.items()))
+ args = f' {args}'
+ self._display.banner(f"TASK {self._task_counter}/{self._task_total} [{task.get_name().strip()}{args}]")
if self._display.verbosity >= 2:
path = task.get_path()
if path:
- self._display.display("task path: %s" % path, color=C.COLOR_DEBUG)
+ self._display.display(f"task path: {path}", color=C.COLOR_DEBUG)
self._host_counter = self._previous_batch_total
self._task_counter += 1
@@ -166,15 +159,15 @@ class CallbackModule(CallbackBase):
return
elif result._result.get('changed', False):
if delegated_vars:
- msg = "changed: %d/%d [%s -> %s]" % (self._host_counter, self._host_total, result._host.get_name(), delegated_vars['ansible_host'])
+ msg = f"changed: {self._host_counter}/{self._host_total} [{result._host.get_name()} -> {delegated_vars['ansible_host']}]"
else:
- msg = "changed: %d/%d [%s]" % (self._host_counter, self._host_total, result._host.get_name())
+ msg = f"changed: {self._host_counter}/{self._host_total} [{result._host.get_name()}]"
color = C.COLOR_CHANGED
else:
if delegated_vars:
- msg = "ok: %d/%d [%s -> %s]" % (self._host_counter, self._host_total, result._host.get_name(), delegated_vars['ansible_host'])
+ msg = f"ok: {self._host_counter}/{self._host_total} [{result._host.get_name()} -> {delegated_vars['ansible_host']}]"
else:
- msg = "ok: %d/%d [%s]" % (self._host_counter, self._host_total, result._host.get_name())
+ msg = f"ok: {self._host_counter}/{self._host_total} [{result._host.get_name()}]"
color = C.COLOR_OK
self._handle_warnings(result._result)
@@ -185,7 +178,7 @@ class CallbackModule(CallbackBase):
self._clean_results(result._result, result._task.action)
if self._run_is_verbose(result):
- msg += " => %s" % (self._dump_results(result._result),)
+ msg += f" => {self._dump_results(result._result)}"
self._display.display(msg, color=color)
def v2_runner_on_failed(self, result, ignore_errors=False):
@@ -206,14 +199,16 @@ class CallbackModule(CallbackBase):
else:
if delegated_vars:
- self._display.display("fatal: %d/%d [%s -> %s]: FAILED! => %s" % (self._host_counter, self._host_total,
- result._host.get_name(), delegated_vars['ansible_host'],
- self._dump_results(result._result)),
- color=C.COLOR_ERROR)
+ self._display.display(
+ f"fatal: {self._host_counter}/{self._host_total} [{result._host.get_name()} -> "
+ f"{delegated_vars['ansible_host']}]: FAILED! => {self._dump_results(result._result)}",
+ color=C.COLOR_ERROR
+ )
else:
- self._display.display("fatal: %d/%d [%s]: FAILED! => %s" % (self._host_counter, self._host_total,
- result._host.get_name(), self._dump_results(result._result)),
- color=C.COLOR_ERROR)
+ self._display.display(
+ f"fatal: {self._host_counter}/{self._host_total} [{result._host.get_name()}]: FAILED! => {self._dump_results(result._result)}",
+ color=C.COLOR_ERROR
+ )
if ignore_errors:
self._display.display("...ignoring", color=C.COLOR_SKIP)
@@ -231,9 +226,9 @@ class CallbackModule(CallbackBase):
if result._task.loop and 'results' in result._result:
self._process_items(result)
else:
- msg = "skipping: %d/%d [%s]" % (self._host_counter, self._host_total, result._host.get_name())
+ msg = f"skipping: {self._host_counter}/{self._host_total} [{result._host.get_name()}]"
if self._run_is_verbose(result):
- msg += " => %s" % self._dump_results(result._result)
+ msg += f" => {self._dump_results(result._result)}"
self._display.display(msg, color=C.COLOR_SKIP)
def v2_runner_on_unreachable(self, result):
@@ -244,11 +239,13 @@ class CallbackModule(CallbackBase):
delegated_vars = result._result.get('_ansible_delegated_vars', None)
if delegated_vars:
- self._display.display("fatal: %d/%d [%s -> %s]: UNREACHABLE! => %s" % (self._host_counter, self._host_total,
- result._host.get_name(), delegated_vars['ansible_host'],
- self._dump_results(result._result)),
- color=C.COLOR_UNREACHABLE)
+ self._display.display(
+ f"fatal: {self._host_counter}/{self._host_total} [{result._host.get_name()} -> "
+ f"{delegated_vars['ansible_host']}]: UNREACHABLE! => {self._dump_results(result._result)}",
+ color=C.COLOR_UNREACHABLE
+ )
else:
- self._display.display("fatal: %d/%d [%s]: UNREACHABLE! => %s" % (self._host_counter, self._host_total,
- result._host.get_name(), self._dump_results(result._result)),
- color=C.COLOR_UNREACHABLE)
+ self._display.display(
+ f"fatal: {self._host_counter}/{self._host_total} [{result._host.get_name()}]: UNREACHABLE! => {self._dump_results(result._result)}",
+ color=C.COLOR_UNREACHABLE
+ )
diff --git a/plugins/callback/default_without_diff.py b/plugins/callback/default_without_diff.py
index 8f300d8e4f..3ea55100bf 100644
--- a/plugins/callback/default_without_diff.py
+++ b/plugins/callback/default_without_diff.py
@@ -4,8 +4,7 @@
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
DOCUMENTATION = r"""
name: default_without_diff
@@ -29,7 +28,7 @@ ansible_config: |
stdout_callback = community.general.default_without_diff
# Enable callback with environment variables:
-environment_variable: |
+environment_variable: |-
ANSIBLE_STDOUT_CALLBACK=community.general.default_without_diff
"""
diff --git a/plugins/callback/dense.py b/plugins/callback/dense.py
index 5757d5115c..1fd68b5d60 100644
--- a/plugins/callback/dense.py
+++ b/plugins/callback/dense.py
@@ -4,17 +4,16 @@
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
DOCUMENTATION = r"""
name: dense
type: stdout
-short_description: minimal stdout output
+short_description: Minimal stdout output
extends_documentation_fragment:
- default_callback
description:
- - When in verbose mode it will act the same as the default callback.
+ - When in verbose mode it acts the same as the default callback.
author:
- Dag Wieers (@dagwieers)
requirements:
@@ -195,7 +194,7 @@ class CallbackModule(CallbackModule_default):
self.disabled = True
def __del__(self):
- sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline)
+ sys.stdout.write(f"{vt100.restore}{vt100.reset}\n{vt100.save}{vt100.clearline}")
def _add_host(self, result, status):
name = result._host.get_name()
@@ -243,7 +242,7 @@ class CallbackModule(CallbackModule_default):
def _handle_exceptions(self, result):
if 'exception' in result:
- # Remove the exception from the result so it's not shown every time
+ # Remove the exception from the result so it is not shown every time
del result['exception']
if self._display.verbosity == 1:
@@ -252,7 +251,7 @@ class CallbackModule(CallbackModule_default):
def _display_progress(self, result=None):
# Always rewrite the complete line
sys.stdout.write(vt100.restore + vt100.reset + vt100.clearline + vt100.nolinewrap + vt100.underline)
- sys.stdout.write('%s %d:' % (self.type, self.count[self.type]))
+ sys.stdout.write(f'{self.type} {self.count[self.type]}:')
sys.stdout.write(vt100.reset)
sys.stdout.flush()
@@ -260,22 +259,18 @@ class CallbackModule(CallbackModule_default):
for name in self.hosts:
sys.stdout.write(' ')
if self.hosts[name].get('delegate', None):
- sys.stdout.write(self.hosts[name]['delegate'] + '>')
+ sys.stdout.write(f"{self.hosts[name]['delegate']}>")
sys.stdout.write(colors[self.hosts[name]['state']] + name + vt100.reset)
sys.stdout.flush()
-# if result._result.get('diff', False):
-# sys.stdout.write('\n' + vt100.linewrap)
sys.stdout.write(vt100.linewrap)
-# self.keep = True
-
def _display_task_banner(self):
if not self.shown_title:
self.shown_title = True
sys.stdout.write(vt100.restore + vt100.reset + vt100.clearline + vt100.underline)
- sys.stdout.write('%s %d: %s' % (self.type, self.count[self.type], self.task.get_name().strip()))
- sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline)
+ sys.stdout.write(f'{self.type} {self.count[self.type]}: {self.task.get_name().strip()}')
+ sys.stdout.write(f"{vt100.restore}{vt100.reset}\n{vt100.save}{vt100.clearline}")
sys.stdout.flush()
else:
sys.stdout.write(vt100.restore + vt100.reset + vt100.clearline)
@@ -284,7 +279,7 @@ class CallbackModule(CallbackModule_default):
def _display_results(self, result, status):
# Leave the previous task on screen (as it has changes/errors)
if self._display.verbosity == 0 and self.keep:
- sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline)
+ sys.stdout.write(f"{vt100.restore}{vt100.reset}\n{vt100.save}{vt100.clearline}")
else:
sys.stdout.write(vt100.restore + vt100.reset + vt100.clearline)
self.keep = False
@@ -309,16 +304,16 @@ class CallbackModule(CallbackModule_default):
if result._task.loop and 'results' in result._result:
self._process_items(result)
else:
- sys.stdout.write(colors[status] + status + ': ')
+ sys.stdout.write(f"{colors[status] + status}: ")
delegated_vars = result._result.get('_ansible_delegated_vars', None)
if delegated_vars:
- sys.stdout.write(vt100.reset + result._host.get_name() + '>' + colors[status] + delegated_vars['ansible_host'])
+ sys.stdout.write(f"{vt100.reset}{result._host.get_name()}>{colors[status]}{delegated_vars['ansible_host']}")
else:
sys.stdout.write(result._host.get_name())
- sys.stdout.write(': ' + dump + '\n')
- sys.stdout.write(vt100.reset + vt100.save + vt100.clearline)
+ sys.stdout.write(f": {dump}\n")
+ sys.stdout.write(f"{vt100.reset}{vt100.save}{vt100.clearline}")
sys.stdout.flush()
if status == 'changed':
@@ -327,7 +322,7 @@ class CallbackModule(CallbackModule_default):
def v2_playbook_on_play_start(self, play):
# Leave the previous task on screen (as it has changes/errors)
if self._display.verbosity == 0 and self.keep:
- sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline + vt100.bold)
+ sys.stdout.write(f"{vt100.restore}{vt100.reset}\n{vt100.save}{vt100.clearline}{vt100.bold}")
else:
sys.stdout.write(vt100.restore + vt100.reset + vt100.clearline + vt100.bold)
@@ -341,14 +336,14 @@ class CallbackModule(CallbackModule_default):
name = play.get_name().strip()
if not name:
name = 'unnamed'
- sys.stdout.write('PLAY %d: %s' % (self.count['play'], name.upper()))
- sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline)
+ sys.stdout.write(f"PLAY {self.count['play']}: {name.upper()}")
+ sys.stdout.write(f"{vt100.restore}{vt100.reset}\n{vt100.save}{vt100.clearline}")
sys.stdout.flush()
def v2_playbook_on_task_start(self, task, is_conditional):
# Leave the previous task on screen (as it has changes/errors)
if self._display.verbosity == 0 and self.keep:
- sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline + vt100.underline)
+ sys.stdout.write(f"{vt100.restore}{vt100.reset}\n{vt100.save}{vt100.clearline}{vt100.underline}")
else:
# Do not clear line, since we want to retain the previous output
sys.stdout.write(vt100.restore + vt100.reset + vt100.underline)
@@ -365,14 +360,14 @@ class CallbackModule(CallbackModule_default):
self.count['task'] += 1
# Write the next task on screen (behind the prompt is the previous output)
- sys.stdout.write('%s %d.' % (self.type, self.count[self.type]))
+ sys.stdout.write(f'{self.type} {self.count[self.type]}.')
sys.stdout.write(vt100.reset)
sys.stdout.flush()
def v2_playbook_on_handler_task_start(self, task):
# Leave the previous task on screen (as it has changes/errors)
if self._display.verbosity == 0 and self.keep:
- sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline + vt100.underline)
+ sys.stdout.write(f"{vt100.restore}{vt100.reset}\n{vt100.save}{vt100.clearline}{vt100.underline}")
else:
sys.stdout.write(vt100.restore + vt100.reset + vt100.clearline + vt100.underline)
@@ -388,7 +383,7 @@ class CallbackModule(CallbackModule_default):
self.count[self.type] += 1
# Write the next task on screen (behind the prompt is the previous output)
- sys.stdout.write('%s %d.' % (self.type, self.count[self.type]))
+ sys.stdout.write(f'{self.type} {self.count[self.type]}.')
sys.stdout.write(vt100.reset)
sys.stdout.flush()
@@ -451,13 +446,13 @@ class CallbackModule(CallbackModule_default):
def v2_playbook_on_no_hosts_remaining(self):
if self._display.verbosity == 0 and self.keep:
- sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline)
+ sys.stdout.write(f"{vt100.restore}{vt100.reset}\n{vt100.save}{vt100.clearline}")
else:
sys.stdout.write(vt100.restore + vt100.reset + vt100.clearline)
self.keep = False
- sys.stdout.write(vt100.white + vt100.redbg + 'NO MORE HOSTS LEFT')
- sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline)
+ sys.stdout.write(f"{vt100.white + vt100.redbg}NO MORE HOSTS LEFT")
+ sys.stdout.write(f"{vt100.restore}{vt100.reset}\n{vt100.save}{vt100.clearline}")
sys.stdout.flush()
def v2_playbook_on_include(self, included_file):
@@ -465,7 +460,7 @@ class CallbackModule(CallbackModule_default):
def v2_playbook_on_stats(self, stats):
if self._display.verbosity == 0 and self.keep:
- sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline)
+ sys.stdout.write(f"{vt100.restore}{vt100.reset}\n{vt100.save}{vt100.clearline}")
else:
sys.stdout.write(vt100.restore + vt100.reset + vt100.clearline)
@@ -476,22 +471,16 @@ class CallbackModule(CallbackModule_default):
sys.stdout.write(vt100.bold + vt100.underline)
sys.stdout.write('SUMMARY')
- sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline)
+ sys.stdout.write(f"{vt100.restore}{vt100.reset}\n{vt100.save}{vt100.clearline}")
sys.stdout.flush()
hosts = sorted(stats.processed.keys())
for h in hosts:
t = stats.summarize(h)
self._display.display(
- u"%s : %s %s %s %s %s %s" % (
- hostcolor(h, t),
- colorize(u'ok', t['ok'], C.COLOR_OK),
- colorize(u'changed', t['changed'], C.COLOR_CHANGED),
- colorize(u'unreachable', t['unreachable'], C.COLOR_UNREACHABLE),
- colorize(u'failed', t['failures'], C.COLOR_ERROR),
- colorize(u'rescued', t['rescued'], C.COLOR_OK),
- colorize(u'ignored', t['ignored'], C.COLOR_WARN),
- ),
+ f"{hostcolor(h, t)} : {colorize('ok', t['ok'], C.COLOR_OK)} {colorize('changed', t['changed'], C.COLOR_CHANGED)} "
+ f"{colorize('unreachable', t['unreachable'], C.COLOR_UNREACHABLE)} {colorize('failed', t['failures'], C.COLOR_ERROR)} "
+ f"{colorize('rescued', t['rescued'], C.COLOR_OK)} {colorize('ignored', t['ignored'], C.COLOR_WARN)}",
screen_only=True
)
diff --git a/plugins/callback/diy.py b/plugins/callback/diy.py
index c27825010e..f84789d010 100644
--- a/plugins/callback/diy.py
+++ b/plugins/callback/diy.py
@@ -4,8 +4,7 @@
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
DOCUMENTATION = r"""
name: diy
@@ -24,15 +23,15 @@ notes:
that is available using the other various execution contexts, such as playbook, play, task, and so on so forth.
- Options being set by their respective variable input can only be set using the variable if the variable was set in a context
that is available to the respective callback. Use the C(ansible_callback_diy) dictionary to see what is available to a
- callback. Additionally, C(ansible_callback_diy.top_level_var_names) will output the top level variable names available
+ callback. Additionally, C(ansible_callback_diy.top_level_var_names) outputs the top level variable names available
to the callback.
- Each option value is rendered as a template before being evaluated. This allows for the dynamic usage of an option. For
- example, C("{{ 'yellow' if ansible_callback_diy.result.is_changed else 'bright green' }}").
- - 'B(Condition) for all C(msg) options: if value C(is None or omit), then the option is not being used. B(Effect): use
- of the C(default) callback plugin for output.'
- - 'B(Condition) for all C(msg) options: if value C(is not None and not omit and length is not greater than 0), then the
+ example, V("{{ 'yellow' if ansible_callback_diy.result.is_changed else 'bright green' }}").
+ - 'B(Condition) for all C(msg) options: if value V(is None or omit), then the option is not being used. B(Effect): use of
+ the C(default) callback plugin for output.'
+ - 'B(Condition) for all C(msg) options: if value V(is not None and not omit and length is not greater than 0), then the
option is being used without output. B(Effect): suppress output.'
- - 'B(Condition) for all C(msg) options: if value C(is not None and not omit and length is greater than 0), then the option
+ - 'B(Condition) for all C(msg) options: if value V(is not None and not omit and length is greater than 0), then the option
is being used with output. B(Effect): render value as template and output.'
- 'Valid color values: V(black), V(bright gray), V(blue), V(white), V(green), V(bright blue), V(cyan), V(bright green),
V(red), V(bright cyan), V(purple), V(bright red), V(yellow), V(bright purple), V(dark gray), V(bright yellow), V(magenta),
@@ -829,9 +828,9 @@ class CallbackModule(Default):
_callback_options = ['msg', 'msg_color']
for option in _callback_options:
- _option_name = '%s_%s' % (_callback_type, option)
+ _option_name = f'{_callback_type}_{option}'
_option_template = variables.get(
- self.DIY_NS + "_" + _option_name,
+ f"{self.DIY_NS}_{_option_name}",
self.get_option(_option_name)
)
_ret.update({option: self._template(
@@ -871,7 +870,7 @@ class CallbackModule(Default):
handler=None, result=None, stats=None, remove_attr_ref_loop=True):
def _get_value(obj, attr=None, method=None):
if attr:
- return getattr(obj, attr, getattr(obj, "_" + attr, None))
+ return getattr(obj, attr, getattr(obj, f"_{attr}", None))
if method:
_method = getattr(obj, method)
diff --git a/plugins/callback/elastic.py b/plugins/callback/elastic.py
index b9aa6adf94..a4b0974f0b 100644
--- a/plugins/callback/elastic.py
+++ b/plugins/callback/elastic.py
@@ -2,8 +2,7 @@
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
DOCUMENTATION = r"""
author: Victor Martinez (@v1v)
@@ -88,6 +87,7 @@ from contextlib import closing
from os.path import basename
from ansible.errors import AnsibleError, AnsibleRuntimeError
+from ansible.module_utils.ansible_release import __version__ as ansible_version
from ansible.module_utils.six import raise_from
from ansible.plugins.callback import CallbackBase
@@ -118,7 +118,7 @@ class TaskData:
if host.uuid in self.host_data:
if host.status == 'included':
# concatenate task include output from multiple items
- host.result = '%s\n%s' % (self.host_data[host.uuid].result, host.result)
+ host.result = f'{self.host_data[host.uuid].result}\n{host.result}'
else:
return
@@ -141,7 +141,6 @@ class HostData:
class ElasticSource(object):
def __init__(self, display):
self.ansible_playbook = ""
- self.ansible_version = None
self.session = str(uuid.uuid4())
self.host = socket.gethostname()
try:
@@ -166,7 +165,7 @@ class ElasticSource(object):
args = None
if not task.no_log and not hide_task_arguments:
- args = ', '.join(('%s=%s' % a for a in task.args.items()))
+ args = ', '.join((f'{k}={v}' for k, v in task.args.items()))
tasks_data[uuid] = TaskData(uuid, name, path, play_name, action, args)
@@ -184,9 +183,6 @@ class ElasticSource(object):
task = tasks_data[task_uuid]
- if self.ansible_version is None and result._task_fields['args'].get('_ansible_version'):
- self.ansible_version = result._task_fields['args'].get('_ansible_version')
-
task.add_host(HostData(host_uuid, host_name, status, result))
def generate_distributed_traces(self, tasks_data, status, end_time, traceparent, apm_service_name,
@@ -210,8 +206,7 @@ class ElasticSource(object):
else:
apm_cli.begin_transaction("Session", start=parent_start_time)
# Populate trace metadata attributes
- if self.ansible_version is not None:
- label(ansible_version=self.ansible_version)
+ label(ansible_version=ansible_version)
label(ansible_session=self.session, ansible_host_name=self.host, ansible_host_user=self.user)
if self.ip_address is not None:
label(ansible_host_ip=self.ip_address)
@@ -225,7 +220,7 @@ class ElasticSource(object):
def create_span_data(self, apm_cli, task_data, host_data):
""" create the span with the given TaskData and HostData """
- name = '[%s] %s: %s' % (host_data.name, task_data.play, task_data.name)
+ name = f'[{host_data.name}] {task_data.play}: {task_data.name}'
message = "success"
status = "success"
@@ -259,7 +254,7 @@ class ElasticSource(object):
"ansible.task.host.status": host_data.status}) as span:
span.outcome = status
if 'failure' in status:
- exception = AnsibleRuntimeError(message="{0}: {1} failed with error message {2}".format(task_data.action, name, enriched_error_message))
+ exception = AnsibleRuntimeError(message=f"{task_data.action}: {name} failed with error message {enriched_error_message}")
apm_cli.capture_exception(exc_info=(type(exception), exception, exception.__traceback__), handled=True)
def init_apm_client(self, apm_server_url, apm_service_name, apm_verify_server_cert, apm_secret_token, apm_api_key):
@@ -288,7 +283,7 @@ class ElasticSource(object):
message = result.get('msg', 'failed')
exception = result.get('exception')
stderr = result.get('stderr')
- return ('message: "{0}"\nexception: "{1}"\nstderr: "{2}"').format(message, exception, stderr)
+ return f"message: \"{message}\"\nexception: \"{exception}\"\nstderr: \"{stderr}\""
class CallbackModule(CallbackBase):
diff --git a/plugins/callback/hipchat.py b/plugins/callback/hipchat.py
deleted file mode 100644
index bf0d425303..0000000000
--- a/plugins/callback/hipchat.py
+++ /dev/null
@@ -1,240 +0,0 @@
-# -*- coding: utf-8 -*-
-# Copyright (c) 2014, Matt Martz
-# Copyright (c) 2017 Ansible Project
-# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-DOCUMENTATION = '''
- author: Unknown (!UNKNOWN)
- name: hipchat
- type: notification
- requirements:
- - whitelist in configuration.
- - prettytable (python lib)
- short_description: post task events to hipchat
- description:
- - This callback plugin sends status updates to a HipChat channel during playbook execution.
- - Before 2.4 only environment variables were available for configuring this plugin.
- deprecated:
- removed_in: 10.0.0
- why: The hipchat service has been discontinued and the self-hosted variant has been End of Life since 2020.
- alternative: There is none.
- options:
- token:
- description: HipChat API token for v1 or v2 API.
- type: str
- required: true
- env:
- - name: HIPCHAT_TOKEN
- ini:
- - section: callback_hipchat
- key: token
- api_version:
- description: HipChat API version, v1 or v2.
- type: str
- choices:
- - v1
- - v2
- required: false
- default: v1
- env:
- - name: HIPCHAT_API_VERSION
- ini:
- - section: callback_hipchat
- key: api_version
- room:
- description: HipChat room to post in.
- type: str
- default: ansible
- env:
- - name: HIPCHAT_ROOM
- ini:
- - section: callback_hipchat
- key: room
- from:
- description: Name to post as
- type: str
- default: ansible
- env:
- - name: HIPCHAT_FROM
- ini:
- - section: callback_hipchat
- key: from
- notify:
- description: Add notify flag to important messages
- type: bool
- default: true
- env:
- - name: HIPCHAT_NOTIFY
- ini:
- - section: callback_hipchat
- key: notify
-
-'''
-
-import os
-import json
-
-try:
- import prettytable
- HAS_PRETTYTABLE = True
-except ImportError:
- HAS_PRETTYTABLE = False
-
-from ansible.plugins.callback import CallbackBase
-from ansible.module_utils.six.moves.urllib.parse import urlencode
-from ansible.module_utils.urls import open_url
-
-
-class CallbackModule(CallbackBase):
- """This is an example ansible callback plugin that sends status
- updates to a HipChat channel during playbook execution.
- """
-
- CALLBACK_VERSION = 2.0
- CALLBACK_TYPE = 'notification'
- CALLBACK_NAME = 'community.general.hipchat'
- CALLBACK_NEEDS_WHITELIST = True
-
- API_V1_URL = 'https://api.hipchat.com/v1/rooms/message'
- API_V2_URL = 'https://api.hipchat.com/v2/'
-
- def __init__(self):
-
- super(CallbackModule, self).__init__()
-
- if not HAS_PRETTYTABLE:
- self.disabled = True
- self._display.warning('The `prettytable` python module is not installed. '
- 'Disabling the HipChat callback plugin.')
- self.printed_playbook = False
- self.playbook_name = None
- self.play = None
-
- def set_options(self, task_keys=None, var_options=None, direct=None):
- super(CallbackModule, self).set_options(task_keys=task_keys, var_options=var_options, direct=direct)
-
- self.token = self.get_option('token')
- self.api_version = self.get_option('api_version')
- self.from_name = self.get_option('from')
- self.allow_notify = self.get_option('notify')
- self.room = self.get_option('room')
-
- if self.token is None:
- self.disabled = True
- self._display.warning('HipChat token could not be loaded. The HipChat '
- 'token can be provided using the `HIPCHAT_TOKEN` '
- 'environment variable.')
-
- # Pick the request handler.
- if self.api_version == 'v2':
- self.send_msg = self.send_msg_v2
- else:
- self.send_msg = self.send_msg_v1
-
- def send_msg_v2(self, msg, msg_format='text', color='yellow', notify=False):
- """Method for sending a message to HipChat"""
-
- headers = {'Authorization': 'Bearer %s' % self.token, 'Content-Type': 'application/json'}
-
- body = {}
- body['room_id'] = self.room
- body['from'] = self.from_name[:15] # max length is 15
- body['message'] = msg
- body['message_format'] = msg_format
- body['color'] = color
- body['notify'] = self.allow_notify and notify
-
- data = json.dumps(body)
- url = self.API_V2_URL + "room/{room_id}/notification".format(room_id=self.room)
- try:
- response = open_url(url, data=data, headers=headers, method='POST')
- return response.read()
- except Exception as ex:
- self._display.warning('Could not submit message to hipchat: {0}'.format(ex))
-
- def send_msg_v1(self, msg, msg_format='text', color='yellow', notify=False):
- """Method for sending a message to HipChat"""
-
- params = {}
- params['room_id'] = self.room
- params['from'] = self.from_name[:15] # max length is 15
- params['message'] = msg
- params['message_format'] = msg_format
- params['color'] = color
- params['notify'] = int(self.allow_notify and notify)
-
- url = ('%s?auth_token=%s' % (self.API_V1_URL, self.token))
- try:
- response = open_url(url, data=urlencode(params))
- return response.read()
- except Exception as ex:
- self._display.warning('Could not submit message to hipchat: {0}'.format(ex))
-
- def v2_playbook_on_play_start(self, play):
- """Display Playbook and play start messages"""
-
- self.play = play
- name = play.name
- # This block sends information about a playbook when it starts
- # The playbook object is not immediately available at
- # playbook_on_start so we grab it via the play
- #
- # Displays info about playbook being started by a person on an
- # inventory, as well as Tags, Skip Tags and Limits
- if not self.printed_playbook:
- self.playbook_name, dummy = os.path.splitext(os.path.basename(self.play.playbook.filename))
- host_list = self.play.playbook.inventory.host_list
- inventory = os.path.basename(os.path.realpath(host_list))
- self.send_msg("%s: Playbook initiated by %s against %s" %
- (self.playbook_name,
- self.play.playbook.remote_user,
- inventory), notify=True)
- self.printed_playbook = True
- subset = self.play.playbook.inventory._subset
- skip_tags = self.play.playbook.skip_tags
- self.send_msg("%s:\nTags: %s\nSkip Tags: %s\nLimit: %s" %
- (self.playbook_name,
- ', '.join(self.play.playbook.only_tags),
- ', '.join(skip_tags) if skip_tags else None,
- ', '.join(subset) if subset else subset))
-
- # This is where we actually say we are starting a play
- self.send_msg("%s: Starting play: %s" %
- (self.playbook_name, name))
-
- def playbook_on_stats(self, stats):
- """Display info about playbook statistics"""
- hosts = sorted(stats.processed.keys())
-
- t = prettytable.PrettyTable(['Host', 'Ok', 'Changed', 'Unreachable',
- 'Failures'])
-
- failures = False
- unreachable = False
-
- for h in hosts:
- s = stats.summarize(h)
-
- if s['failures'] > 0:
- failures = True
- if s['unreachable'] > 0:
- unreachable = True
-
- t.add_row([h] + [s[k] for k in ['ok', 'changed', 'unreachable',
- 'failures']])
-
- self.send_msg("%s: Playbook complete" % self.playbook_name,
- notify=True)
-
- if failures or unreachable:
- color = 'red'
- self.send_msg("%s: Failures detected" % self.playbook_name,
- color=color, notify=True)
- else:
- color = 'green'
-
- self.send_msg("/code %s:\n%s" % (self.playbook_name, t), color=color)
diff --git a/plugins/callback/jabber.py b/plugins/callback/jabber.py
index 6cd94d03b7..c5a0881e14 100644
--- a/plugins/callback/jabber.py
+++ b/plugins/callback/jabber.py
@@ -4,14 +4,13 @@
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
DOCUMENTATION = r"""
author: Unknown (!UNKNOWN)
name: jabber
type: notification
-short_description: post task events to a Jabber server
+short_description: Post task events to a Jabber server
description:
- The chatty part of ChatOps with a Hipchat server as a target.
- This callback plugin sends status updates to a HipChat channel during playbook execution.
@@ -37,7 +36,7 @@ options:
env:
- name: JABBER_PASS
to:
- description: Chat identifier that will receive the message.
+ description: Chat identifier that receives the message.
type: str
required: true
env:
@@ -102,7 +101,7 @@ class CallbackModule(CallbackBase):
"""Display Playbook and play start messages"""
self.play = play
name = play.name
- self.send_msg("Ansible starting play: %s" % (name))
+ self.send_msg(f"Ansible starting play: {name}")
def playbook_on_stats(self, stats):
name = self.play
@@ -118,7 +117,7 @@ class CallbackModule(CallbackBase):
if failures or unreachable:
out = self.debug
- self.send_msg("%s: Failures detected \n%s \nHost: %s\n Failed at:\n%s" % (name, self.task, h, out))
+ self.send_msg(f"{name}: Failures detected \n{self.task} \nHost: {h}\n Failed at:\n{out}")
else:
out = self.debug
- self.send_msg("Great! \n Playbook %s completed:\n%s \n Last task debug:\n %s" % (name, s, out))
+ self.send_msg(f"Great! \n Playbook {name} completed:\n{s} \n Last task debug:\n {out}")
diff --git a/plugins/callback/log_plays.py b/plugins/callback/log_plays.py
index 0b658717f6..3de6c0bec0 100644
--- a/plugins/callback/log_plays.py
+++ b/plugins/callback/log_plays.py
@@ -4,14 +4,13 @@
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
DOCUMENTATION = r"""
author: Unknown (!UNKNOWN)
name: log_plays
type: notification
-short_description: write playbook output to log file
+short_description: Write playbook output to log file
description:
- This callback writes playbook output to a file per host in the C(/var/log/ansible/hosts) directory.
requirements:
@@ -20,7 +19,7 @@ requirements:
options:
log_folder:
default: /var/log/ansible/hosts
- description: The folder where log files will be created.
+ description: The folder where log files are created.
type: str
env:
- name: ANSIBLE_LOG_FOLDER
@@ -57,7 +56,10 @@ class CallbackModule(CallbackBase):
CALLBACK_NEEDS_WHITELIST = True
TIME_FORMAT = "%b %d %Y %H:%M:%S"
- MSG_FORMAT = "%(now)s - %(playbook)s - %(task_name)s - %(task_action)s - %(category)s - %(data)s\n\n"
+
+ @staticmethod
+ def _make_msg(now, playbook, task_name, task_action, category, data):
+ return f"{now} - {playbook} - {task_name} - {task_action} - {category} - {data}\n\n"
def __init__(self):
@@ -82,22 +84,12 @@ class CallbackModule(CallbackBase):
invocation = data.pop('invocation', None)
data = json.dumps(data, cls=AnsibleJSONEncoder)
if invocation is not None:
- data = json.dumps(invocation) + " => %s " % data
+ data = f"{json.dumps(invocation)} => {data} "
path = os.path.join(self.log_folder, result._host.get_name())
now = time.strftime(self.TIME_FORMAT, time.localtime())
- msg = to_bytes(
- self.MSG_FORMAT
- % dict(
- now=now,
- playbook=self.playbook,
- task_name=result._task.name,
- task_action=result._task.action,
- category=category,
- data=data,
- )
- )
+ msg = to_bytes(self._make_msg(now, self.playbook, result._task.name, result._task.action, category, data))
with open(path, "ab") as fd:
fd.write(msg)
diff --git a/plugins/callback/loganalytics.py b/plugins/callback/loganalytics.py
index 9411dc8c0d..bd6b89fde1 100644
--- a/plugins/callback/loganalytics.py
+++ b/plugins/callback/loganalytics.py
@@ -3,8 +3,7 @@
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
DOCUMENTATION = r"""
name: loganalytics
@@ -12,7 +11,7 @@ type: notification
short_description: Posts task results to Azure Log Analytics
author: "Cyrus Li (@zhcli) "
description:
- - This callback plugin will post task results in JSON formatted to an Azure Log Analytics workspace.
+ - This callback plugin posts task results in JSON formatted to an Azure Log Analytics workspace.
- Credits to authors of splunk callback plugin.
version_added: "2.4.0"
requirements:
@@ -63,6 +62,7 @@ import getpass
from os.path import basename
+from ansible.module_utils.ansible_release import __version__ as ansible_version
from ansible.module_utils.urls import open_url
from ansible.parsing.ajson import AnsibleJSONEncoder
from ansible.plugins.callback import CallbackBase
@@ -76,7 +76,6 @@ class AzureLogAnalyticsSource(object):
def __init__(self):
self.ansible_check_mode = False
self.ansible_playbook = ""
- self.ansible_version = ""
self.session = str(uuid.uuid4())
self.host = socket.gethostname()
self.user = getpass.getuser()
@@ -84,18 +83,17 @@ class AzureLogAnalyticsSource(object):
def __build_signature(self, date, workspace_id, shared_key, content_length):
# Build authorisation signature for Azure log analytics API call
- sigs = "POST\n{0}\napplication/json\nx-ms-date:{1}\n/api/logs".format(
- str(content_length), date)
+ sigs = f"POST\n{content_length}\napplication/json\nx-ms-date:{date}\n/api/logs"
utf8_sigs = sigs.encode('utf-8')
decoded_shared_key = base64.b64decode(shared_key)
hmac_sha256_sigs = hmac.new(
decoded_shared_key, utf8_sigs, digestmod=hashlib.sha256).digest()
encoded_hash = base64.b64encode(hmac_sha256_sigs).decode('utf-8')
- signature = "SharedKey {0}:{1}".format(workspace_id, encoded_hash)
+ signature = f"SharedKey {workspace_id}:{encoded_hash}"
return signature
def __build_workspace_url(self, workspace_id):
- return "https://{0}.ods.opinsights.azure.com/api/logs?api-version=2016-04-01".format(workspace_id)
+ return f"https://{workspace_id}.ods.opinsights.azure.com/api/logs?api-version=2016-04-01"
def __rfc1123date(self):
return now().strftime('%a, %d %b %Y %H:%M:%S GMT')
@@ -104,10 +102,6 @@ class AzureLogAnalyticsSource(object):
if result._task_fields['args'].get('_ansible_check_mode') is True:
self.ansible_check_mode = True
- if result._task_fields['args'].get('_ansible_version'):
- self.ansible_version = \
- result._task_fields['args'].get('_ansible_version')
-
if result._task._role:
ansible_role = str(result._task._role)
else:
@@ -121,7 +115,7 @@ class AzureLogAnalyticsSource(object):
data['host'] = self.host
data['user'] = self.user
data['runtime'] = runtime
- data['ansible_version'] = self.ansible_version
+ data['ansible_version'] = ansible_version
data['ansible_check_mode'] = self.ansible_check_mode
data['ansible_host'] = result._host.name
data['ansible_playbook'] = self.ansible_playbook
diff --git a/plugins/callback/logdna.py b/plugins/callback/logdna.py
index f5cfb4800c..9ceb6547b2 100644
--- a/plugins/callback/logdna.py
+++ b/plugins/callback/logdna.py
@@ -3,8 +3,7 @@
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
DOCUMENTATION = r"""
author: Unknown (!UNKNOWN)
@@ -12,7 +11,7 @@ name: logdna
type: notification
short_description: Sends playbook logs to LogDNA
description:
- - This callback will report logs from playbook actions, tasks, and events to LogDNA (U(https://app.logdna.com)).
+ - This callback reports logs from playbook actions, tasks, and events to LogDNA (U(https://app.logdna.com)).
requirements:
- LogDNA Python Library (U(https://github.com/logdna/python))
- whitelisting in configuration
@@ -73,7 +72,7 @@ except ImportError:
# Getting MAC Address of system:
def get_mac():
- mac = "%012x" % getnode()
+ mac = f"{getnode():012x}"
return ":".join(map(lambda index: mac[index:index + 2], range(int(len(mac) / 2))))
@@ -161,7 +160,7 @@ class CallbackModule(CallbackBase):
if ninvalidKeys > 0:
for key in invalidKeys:
del meta[key]
- meta['__errors'] = 'These keys have been sanitized: ' + ', '.join(invalidKeys)
+ meta['__errors'] = f"These keys have been sanitized: {', '.join(invalidKeys)}"
return meta
def sanitizeJSON(self, data):
diff --git a/plugins/callback/logentries.py b/plugins/callback/logentries.py
index a7adfbf3aa..796398d6b6 100644
--- a/plugins/callback/logentries.py
+++ b/plugins/callback/logentries.py
@@ -3,8 +3,7 @@
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
DOCUMENTATION = r"""
author: Unknown (!UNKNOWN)
@@ -12,7 +11,7 @@ name: logentries
type: notification
short_description: Sends events to Logentries
description:
- - This callback plugin will generate JSON objects and send them to Logentries using TCP for auditing/debugging purposes.
+ - This callback plugin generates JSON objects and send them to Logentries using TCP for auditing/debugging purposes.
requirements:
- whitelisting in configuration
- certifi (Python library)
@@ -133,7 +132,7 @@ class PlainTextSocketAppender(object):
# Error message displayed when an incorrect Token has been detected
self.INVALID_TOKEN = "\n\nIt appears the LOGENTRIES_TOKEN parameter you entered is incorrect!\n\n"
# Unicode Line separator character \u2028
- self.LINE_SEP = u'\u2028'
+ self.LINE_SEP = '\u2028'
self._display = display
self._conn = None
@@ -151,7 +150,7 @@ class PlainTextSocketAppender(object):
self.open_connection()
return
except Exception as e:
- self._display.vvvv(u"Unable to connect to Logentries: %s" % to_text(e))
+ self._display.vvvv(f"Unable to connect to Logentries: {e}")
root_delay *= 2
if root_delay > self.MAX_DELAY:
@@ -160,7 +159,7 @@ class PlainTextSocketAppender(object):
wait_for = root_delay + random.uniform(0, root_delay)
try:
- self._display.vvvv("sleeping %s before retry" % wait_for)
+ self._display.vvvv(f"sleeping {wait_for} before retry")
time.sleep(wait_for)
except KeyboardInterrupt:
raise
@@ -173,8 +172,8 @@ class PlainTextSocketAppender(object):
# Replace newlines with Unicode line separator
# for multi-line events
data = to_text(data, errors='surrogate_or_strict')
- multiline = data.replace(u'\n', self.LINE_SEP)
- multiline += u"\n"
+ multiline = data.replace('\n', self.LINE_SEP)
+ multiline += "\n"
# Send data, reconnect if needed
while True:
try:
@@ -247,7 +246,7 @@ class CallbackModule(CallbackBase):
self.use_tls = self.get_option('use_tls')
self.flatten = self.get_option('flatten')
except KeyError as e:
- self._display.warning(u"Missing option for Logentries callback plugin: %s" % to_text(e))
+ self._display.warning(f"Missing option for Logentries callback plugin: {e}")
self.disabled = True
try:
@@ -266,10 +265,10 @@ class CallbackModule(CallbackBase):
if not self.disabled:
if self.use_tls:
- self._display.vvvv("Connecting to %s:%s with TLS" % (self.api_url, self.api_tls_port))
+ self._display.vvvv(f"Connecting to {self.api_url}:{self.api_tls_port} with TLS")
self._appender = TLSSocketAppender(display=self._display, LE_API=self.api_url, LE_TLS_PORT=self.api_tls_port)
else:
- self._display.vvvv("Connecting to %s:%s" % (self.api_url, self.api_port))
+ self._display.vvvv(f"Connecting to {self.api_url}:{self.api_port}")
self._appender = PlainTextSocketAppender(display=self._display, LE_API=self.api_url, LE_PORT=self.api_port)
self._appender.reopen_connection()
@@ -282,7 +281,7 @@ class CallbackModule(CallbackBase):
def emit(self, record):
msg = record.rstrip('\n')
- msg = "{0} {1}".format(self.token, msg)
+ msg = f"{self.token} {msg}"
self._appender.put(msg)
self._display.vvvv("Sent event to logentries")
diff --git a/plugins/callback/logstash.py b/plugins/callback/logstash.py
index 088a84bf78..8b5acc6b9f 100644
--- a/plugins/callback/logstash.py
+++ b/plugins/callback/logstash.py
@@ -4,8 +4,7 @@
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
DOCUMENTATION = r"""
author: Yevhen Khmelenko (@ujenmr)
@@ -13,7 +12,7 @@ name: logstash
type: notification
short_description: Sends events to Logstash
description:
- - This callback will report facts and task events to Logstash U(https://www.elastic.co/products/logstash).
+ - This callback reports facts and task events to Logstash U(https://www.elastic.co/products/logstash).
requirements:
- whitelisting in configuration
- logstash (Python library)
@@ -128,9 +127,7 @@ class CallbackModule(CallbackBase):
if not HAS_LOGSTASH:
self.disabled = True
- self._display.warning("The required python-logstash/python3-logstash is not installed. "
- "pip install python-logstash for Python 2"
- "pip install python3-logstash for Python 3")
+ self._display.warning("The required python3-logstash is not installed.")
self.start_time = now()
@@ -183,7 +180,7 @@ class CallbackModule(CallbackBase):
data['status'] = "OK"
data['ansible_playbook'] = playbook._file_name
- if (self.ls_format_version == "v2"):
+ if self.ls_format_version == "v2":
self.logger.info(
"START PLAYBOOK | %s", data['ansible_playbook'], extra=data
)
@@ -208,7 +205,7 @@ class CallbackModule(CallbackBase):
data['ansible_playbook_duration'] = runtime.total_seconds()
data['ansible_result'] = json.dumps(summarize_stat) # deprecated field
- if (self.ls_format_version == "v2"):
+ if self.ls_format_version == "v2":
self.logger.info(
"FINISH PLAYBOOK | %s", json.dumps(summarize_stat), extra=data
)
@@ -227,7 +224,7 @@ class CallbackModule(CallbackBase):
data['ansible_play_id'] = self.play_id
data['ansible_play_name'] = self.play_name
- if (self.ls_format_version == "v2"):
+ if self.ls_format_version == "v2":
self.logger.info("START PLAY | %s", self.play_name, extra=data)
else:
self.logger.info("ansible play", extra=data)
@@ -252,7 +249,7 @@ class CallbackModule(CallbackBase):
data['ansible_task'] = task_name
data['ansible_facts'] = self._dump_results(result._result)
- if (self.ls_format_version == "v2"):
+ if self.ls_format_version == "v2":
self.logger.info(
"SETUP FACTS | %s", self._dump_results(result._result), extra=data
)
@@ -273,7 +270,7 @@ class CallbackModule(CallbackBase):
data['ansible_task_id'] = self.task_id
data['ansible_result'] = self._dump_results(result._result)
- if (self.ls_format_version == "v2"):
+ if self.ls_format_version == "v2":
self.logger.info(
"TASK OK | %s | RESULT | %s",
task_name, self._dump_results(result._result), extra=data
@@ -294,7 +291,7 @@ class CallbackModule(CallbackBase):
data['ansible_task_id'] = self.task_id
data['ansible_result'] = self._dump_results(result._result)
- if (self.ls_format_version == "v2"):
+ if self.ls_format_version == "v2":
self.logger.info("TASK SKIPPED | %s", task_name, extra=data)
else:
self.logger.info("ansible skipped", extra=data)
@@ -308,7 +305,7 @@ class CallbackModule(CallbackBase):
data['ansible_play_name'] = self.play_name
data['imported_file'] = imported_file
- if (self.ls_format_version == "v2"):
+ if self.ls_format_version == "v2":
self.logger.info("IMPORT | %s", imported_file, extra=data)
else:
self.logger.info("ansible import", extra=data)
@@ -322,7 +319,7 @@ class CallbackModule(CallbackBase):
data['ansible_play_name'] = self.play_name
data['imported_file'] = missing_file
- if (self.ls_format_version == "v2"):
+ if self.ls_format_version == "v2":
self.logger.info("NOT IMPORTED | %s", missing_file, extra=data)
else:
self.logger.info("ansible import", extra=data)
@@ -346,7 +343,7 @@ class CallbackModule(CallbackBase):
data['ansible_result'] = self._dump_results(result._result)
self.errors += 1
- if (self.ls_format_version == "v2"):
+ if self.ls_format_version == "v2":
self.logger.error(
"TASK FAILED | %s | HOST | %s | RESULT | %s",
task_name, self.hostname,
@@ -369,7 +366,7 @@ class CallbackModule(CallbackBase):
data['ansible_result'] = self._dump_results(result._result)
self.errors += 1
- if (self.ls_format_version == "v2"):
+ if self.ls_format_version == "v2":
self.logger.error(
"UNREACHABLE | %s | HOST | %s | RESULT | %s",
task_name, self.hostname,
@@ -392,7 +389,7 @@ class CallbackModule(CallbackBase):
data['ansible_result'] = self._dump_results(result._result)
self.errors += 1
- if (self.ls_format_version == "v2"):
+ if self.ls_format_version == "v2":
self.logger.error(
"ASYNC FAILED | %s | HOST | %s | RESULT | %s",
task_name, self.hostname,
diff --git a/plugins/callback/mail.py b/plugins/callback/mail.py
index a6b6f4c1ef..d05982cd61 100644
--- a/plugins/callback/mail.py
+++ b/plugins/callback/mail.py
@@ -4,15 +4,14 @@
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
DOCUMENTATION = r"""
name: mail
type: notification
short_description: Sends failure events through email
description:
- - This callback will report failures through email.
+ - This callback reports failures through email.
author:
- Dag Wieers (@dagwieers)
requirements:
@@ -134,14 +133,14 @@ class CallbackModule(CallbackBase):
if self.bcc:
bcc_addresses = email.utils.getaddresses(self.bcc)
- content = 'Date: %s\n' % email.utils.formatdate()
- content += 'From: %s\n' % email.utils.formataddr(sender_address)
+ content = f'Date: {email.utils.formatdate()}\n'
+ content += f'From: {email.utils.formataddr(sender_address)}\n'
if self.to:
- content += 'To: %s\n' % ', '.join([email.utils.formataddr(pair) for pair in to_addresses])
+ content += f"To: {', '.join([email.utils.formataddr(pair) for pair in to_addresses])}\n"
if self.cc:
- content += 'Cc: %s\n' % ', '.join([email.utils.formataddr(pair) for pair in cc_addresses])
- content += 'Message-ID: %s\n' % email.utils.make_msgid(domain=self.get_option('message_id_domain'))
- content += 'Subject: %s\n\n' % subject.strip()
+ content += f"Cc: {', '.join([email.utils.formataddr(pair) for pair in cc_addresses])}\n"
+ content += f"Message-ID: {email.utils.make_msgid(domain=self.get_option('message_id_domain'))}\n"
+ content += f'Subject: {subject.strip()}\n\n'
content += body
addresses = to_addresses
@@ -158,23 +157,22 @@ class CallbackModule(CallbackBase):
smtp.quit()
def subject_msg(self, multiline, failtype, linenr):
- return '%s: %s' % (failtype, multiline.strip('\r\n').splitlines()[linenr])
+ msg = multiline.strip('\r\n').splitlines()[linenr]
+ return f'{failtype}: {msg}'
def indent(self, multiline, indent=8):
return re.sub('^', ' ' * indent, multiline, flags=re.MULTILINE)
def body_blob(self, multiline, texttype):
''' Turn some text output in a well-indented block for sending in a mail body '''
- intro = 'with the following %s:\n\n' % texttype
- blob = ''
- for line in multiline.strip('\r\n').splitlines():
- blob += '%s\n' % line
- return intro + self.indent(blob) + '\n'
+ intro = f'with the following {texttype}:\n\n'
+ blob = "\n".join(multiline.strip('\r\n').splitlines())
+ return f"{intro}{self.indent(blob)}\n"
def mail_result(self, result, failtype):
host = result._host.get_name()
if not self.sender:
- self.sender = '"Ansible: %s" ' % host
+ self.sender = f'"Ansible: {host}" '
# Add subject
if self.itembody:
@@ -190,31 +188,33 @@ class CallbackModule(CallbackBase):
elif result._result.get('exception'): # Unrelated exceptions are added to output :-/
subject = self.subject_msg(result._result['exception'], failtype, -1)
else:
- subject = '%s: %s' % (failtype, result._task.name or result._task.action)
+ subject = f'{failtype}: {result._task.name or result._task.action}'
# Make playbook name visible (e.g. in Outlook/Gmail condensed view)
- body = 'Playbook: %s\n' % os.path.basename(self.playbook._file_name)
+ body = f'Playbook: {os.path.basename(self.playbook._file_name)}\n'
if result._task.name:
- body += 'Task: %s\n' % result._task.name
- body += 'Module: %s\n' % result._task.action
- body += 'Host: %s\n' % host
+ body += f'Task: {result._task.name}\n'
+ body += f'Module: {result._task.action}\n'
+ body += f'Host: {host}\n'
body += '\n'
# Add task information (as much as possible)
body += 'The following task failed:\n\n'
if 'invocation' in result._result:
- body += self.indent('%s: %s\n' % (result._task.action, json.dumps(result._result['invocation']['module_args'], indent=4)))
+ body += self.indent(f"{result._task.action}: {json.dumps(result._result['invocation']['module_args'], indent=4)}\n")
elif result._task.name:
- body += self.indent('%s (%s)\n' % (result._task.name, result._task.action))
+ body += self.indent(f'{result._task.name} ({result._task.action})\n')
else:
- body += self.indent('%s\n' % result._task.action)
+ body += self.indent(f'{result._task.action}\n')
body += '\n'
# Add item / message
if self.itembody:
body += self.itembody
elif result._result.get('failed_when_result') is True:
- body += "due to the following condition:\n\n" + self.indent('failed_when:\n- ' + '\n- '.join(result._task.failed_when)) + '\n\n'
+ fail_cond_list = '\n- '.join(result._task.failed_when)
+ fail_cond = self.indent(f"failed_when:\n- {fail_cond_list}")
+ body += f"due to the following condition:\n\n{fail_cond}\n\n"
elif result._result.get('msg'):
body += self.body_blob(result._result['msg'], 'message')
@@ -227,13 +227,13 @@ class CallbackModule(CallbackBase):
body += self.body_blob(result._result['exception'], 'exception')
if result._result.get('warnings'):
for i in range(len(result._result.get('warnings'))):
- body += self.body_blob(result._result['warnings'][i], 'exception %d' % (i + 1))
+ body += self.body_blob(result._result['warnings'][i], f'exception {i + 1}')
if result._result.get('deprecations'):
for i in range(len(result._result.get('deprecations'))):
- body += self.body_blob(result._result['deprecations'][i], 'exception %d' % (i + 1))
+ body += self.body_blob(result._result['deprecations'][i], f'exception {i + 1}')
body += 'and a complete dump of the error:\n\n'
- body += self.indent('%s: %s' % (failtype, json.dumps(result._result, cls=AnsibleJSONEncoder, indent=4)))
+ body += self.indent(f'{failtype}: {json.dumps(result._result, cls=AnsibleJSONEncoder, indent=4)}')
self.mail(subject=subject, body=body)
@@ -256,4 +256,4 @@ class CallbackModule(CallbackBase):
def v2_runner_item_on_failed(self, result):
# Pass item information to task failure
self.itemsubject = result._result['msg']
- self.itembody += self.body_blob(json.dumps(result._result, cls=AnsibleJSONEncoder, indent=4), "failed item dump '%(item)s'" % result._result)
+ self.itembody += self.body_blob(json.dumps(result._result, cls=AnsibleJSONEncoder, indent=4), f"failed item dump '{result._result['item']}'")
diff --git a/plugins/callback/nrdp.py b/plugins/callback/nrdp.py
index 83b5fbf9dc..375876973a 100644
--- a/plugins/callback/nrdp.py
+++ b/plugins/callback/nrdp.py
@@ -4,8 +4,7 @@
# SPDX-License-Identifier: GPL-3.0-or-later
# Make coding more python3-ish
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
DOCUMENTATION = r"""
name: nrdp
@@ -132,10 +131,10 @@ class CallbackModule(CallbackBase):
xmldata = "\n"
xmldata += "\n"
xmldata += "\n"
- xmldata += "%s\n" % self.hostname
- xmldata += "%s\n" % self.servicename
- xmldata += "%d\n" % state
- xmldata += "\n" % msg
+ xmldata += f"{self.hostname}\n"
+ xmldata += f"{self.servicename}\n"
+ xmldata += f"{state}\n"
+ xmldata += f"\n"
xmldata += "\n"
xmldata += "\n"
@@ -152,7 +151,7 @@ class CallbackModule(CallbackBase):
validate_certs=self.validate_nrdp_certs)
return response.read()
except Exception as ex:
- self._display.warning("NRDP callback cannot send result {0}".format(ex))
+ self._display.warning(f"NRDP callback cannot send result {ex}")
def v2_playbook_on_play_start(self, play):
'''
@@ -170,17 +169,16 @@ class CallbackModule(CallbackBase):
critical = warning = 0
for host in hosts:
stat = stats.summarize(host)
- gstats += "'%s_ok'=%d '%s_changed'=%d \
- '%s_unreachable'=%d '%s_failed'=%d " % \
- (host, stat['ok'], host, stat['changed'],
- host, stat['unreachable'], host, stat['failures'])
+ gstats += (
+ f"'{host}_ok'={stat['ok']} '{host}_changed'={stat['changed']} '{host}_unreachable'={stat['unreachable']} '{host}_failed'={stat['failures']} "
+ )
# Critical when failed tasks or unreachable host
critical += stat['failures']
critical += stat['unreachable']
# Warning when changed tasks
warning += stat['changed']
- msg = "%s | %s" % (name, gstats)
+ msg = f"{name} | {gstats}"
if critical:
# Send Critical
self._send_nrdp(self.CRITICAL, msg)
diff --git a/plugins/callback/null.py b/plugins/callback/null.py
index 0cc722f63b..0527c1c467 100644
--- a/plugins/callback/null.py
+++ b/plugins/callback/null.py
@@ -4,8 +4,7 @@
# SPDX-License-Identifier: GPL-3.0-or-later
# Make coding more python3-ish
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
DOCUMENTATION = r"""
author: Unknown (!UNKNOWN)
@@ -13,7 +12,7 @@ name: 'null'
type: stdout
requirements:
- set as main display callback
-short_description: do not display stuff to screen
+short_description: Do not display stuff to screen
description:
- This callback prevents outputting events to screen.
"""
diff --git a/plugins/callback/opentelemetry.py b/plugins/callback/opentelemetry.py
index 8cb77f3cf8..80f24924b9 100644
--- a/plugins/callback/opentelemetry.py
+++ b/plugins/callback/opentelemetry.py
@@ -3,8 +3,7 @@
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
DOCUMENTATION = r"""
author: Victor Martinez (@v1v)
@@ -36,8 +35,8 @@ options:
- Whether to enable this callback only if the given environment variable exists and it is set to V(true).
- This is handy when you use Configuration as Code and want to send distributed traces if running in the CI rather when
running Ansible locally.
- - For such, it evaluates the given O(enable_from_environment) value as environment variable and if set to true this
- plugin will be enabled.
+ - For such, it evaluates the given O(enable_from_environment) value as environment variable and if set to V(true) this
+ plugin is enabled.
env:
- name: ANSIBLE_OPENTELEMETRY_ENABLE_FROM_ENVIRONMENT
ini:
@@ -137,14 +136,14 @@ import getpass
import json
import os
import socket
-import sys
-import time
import uuid
+from time import time_ns
from collections import OrderedDict
from os.path import basename
from ansible.errors import AnsibleError
+from ansible.module_utils.ansible_release import __version__ as ansible_version
from ansible.module_utils.six import raise_from
from ansible.module_utils.six.moves.urllib.parse import urlparse
from ansible.plugins.callback import CallbackBase
@@ -165,31 +164,12 @@ try:
from opentelemetry.sdk.trace.export.in_memory_span_exporter import (
InMemorySpanExporter
)
- # Support for opentelemetry-api <= 1.12
- try:
- from opentelemetry.util._time import _time_ns
- except ImportError as imp_exc:
- OTEL_LIBRARY_TIME_NS_ERROR = imp_exc
- else:
- OTEL_LIBRARY_TIME_NS_ERROR = None
-
except ImportError as imp_exc:
OTEL_LIBRARY_IMPORT_ERROR = imp_exc
- OTEL_LIBRARY_TIME_NS_ERROR = imp_exc
else:
OTEL_LIBRARY_IMPORT_ERROR = None
-if sys.version_info >= (3, 7):
- time_ns = time.time_ns
-elif not OTEL_LIBRARY_TIME_NS_ERROR:
- time_ns = _time_ns
-else:
- def time_ns():
- # Support versions older than 3.7 with opentelemetry-api > 1.12
- return int(time.time() * 1e9)
-
-
class TaskData:
"""
Data about an individual task.
@@ -210,7 +190,7 @@ class TaskData:
if host.uuid in self.host_data:
if host.status == 'included':
# concatenate task include output from multiple items
- host.result = '%s\n%s' % (self.host_data[host.uuid].result, host.result)
+ host.result = f'{self.host_data[host.uuid].result}\n{host.result}'
else:
return
@@ -233,7 +213,6 @@ class HostData:
class OpenTelemetrySource(object):
def __init__(self, display):
self.ansible_playbook = ""
- self.ansible_version = None
self.session = str(uuid.uuid4())
self.host = socket.gethostname()
try:
@@ -281,9 +260,6 @@ class OpenTelemetrySource(object):
task = tasks_data[task_uuid]
- if self.ansible_version is None and hasattr(result, '_task_fields') and result._task_fields['args'].get('_ansible_version'):
- self.ansible_version = result._task_fields['args'].get('_ansible_version')
-
task.dump = dump
task.add_host(HostData(host_uuid, host_name, status, result))
@@ -331,8 +307,7 @@ class OpenTelemetrySource(object):
start_time=parent_start_time, kind=SpanKind.SERVER) as parent:
parent.set_status(status)
# Populate trace metadata attributes
- if self.ansible_version is not None:
- parent.set_attribute("ansible.version", self.ansible_version)
+ parent.set_attribute("ansible.version", ansible_version)
parent.set_attribute("ansible.session", self.session)
parent.set_attribute("ansible.host.name", self.host)
if self.ip_address is not None:
@@ -348,7 +323,7 @@ class OpenTelemetrySource(object):
def update_span_data(self, task_data, host_data, span, disable_logs, disable_attributes_in_logs):
""" update the span with the given TaskData and HostData """
- name = '[%s] %s: %s' % (host_data.name, task_data.play, task_data.name)
+ name = f'[{host_data.name}] {task_data.play}: {task_data.name}'
message = 'success'
res = {}
@@ -471,7 +446,7 @@ class OpenTelemetrySource(object):
def get_error_message_from_results(results, action):
for result in results:
if result.get('failed', False):
- return ('{0}({1}) - {2}').format(action, result.get('item', 'none'), OpenTelemetrySource.get_error_message(result))
+ return f"{action}({result.get('item', 'none')}) - {OpenTelemetrySource.get_error_message(result)}"
@staticmethod
def _last_line(text):
@@ -483,14 +458,14 @@ class OpenTelemetrySource(object):
message = result.get('msg', 'failed')
exception = result.get('exception')
stderr = result.get('stderr')
- return ('message: "{0}"\nexception: "{1}"\nstderr: "{2}"').format(message, exception, stderr)
+ return f"message: \"{message}\"\nexception: \"{exception}\"\nstderr: \"{stderr}\""
@staticmethod
def enrich_error_message_from_results(results, action):
message = ""
for result in results:
if result.get('failed', False):
- message = ('{0}({1}) - {2}\n{3}').format(action, result.get('item', 'none'), OpenTelemetrySource.enrich_error_message(result), message)
+ message = f"{action}({result.get('item', 'none')}) - {OpenTelemetrySource.enrich_error_message(result)}\n{message}"
return message
@@ -536,8 +511,9 @@ class CallbackModule(CallbackBase):
environment_variable = self.get_option('enable_from_environment')
if environment_variable is not None and os.environ.get(environment_variable, 'false').lower() != 'true':
self.disabled = True
- self._display.warning("The `enable_from_environment` option has been set and {0} is not enabled. "
- "Disabling the `opentelemetry` callback plugin.".format(environment_variable))
+ self._display.warning(
+ f"The `enable_from_environment` option has been set and {environment_variable} is not enabled. Disabling the `opentelemetry` callback plugin."
+ )
self.hide_task_arguments = self.get_option('hide_task_arguments')
diff --git a/plugins/callback/print_task.py b/plugins/callback/print_task.py
new file mode 100644
index 0000000000..809baddb95
--- /dev/null
+++ b/plugins/callback/print_task.py
@@ -0,0 +1,64 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2025, Max Mitschke
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r"""
+name: print_task
+type: aggregate
+short_description: Prints playbook task snippet to job output
+description:
+ - This plugin prints the currently executing playbook task to the job output.
+version_added: 10.7.0
+requirements:
+ - enable in configuration
+"""
+
+EXAMPLES = r"""
+ansible.cfg: |-
+ # Enable plugin
+ [defaults]
+ callbacks_enabled=community.general.print_task
+"""
+
+from yaml import load, dump
+
+try:
+ from yaml import CSafeDumper as SafeDumper
+ from yaml import CSafeLoader as SafeLoader
+except ImportError:
+ from yaml import SafeDumper, SafeLoader
+
+from ansible.plugins.callback import CallbackBase
+
+
+class CallbackModule(CallbackBase):
+ """
+ This callback module tells you how long your plays ran for.
+ """
+ CALLBACK_VERSION = 2.0
+ CALLBACK_TYPE = 'aggregate'
+ CALLBACK_NAME = 'community.general.print_task'
+
+ CALLBACK_NEEDS_ENABLED = True
+
+ def __init__(self):
+ super(CallbackModule, self).__init__()
+ self._printed_message = False
+
+ def _print_task(self, task):
+ if hasattr(task, '_ds'):
+ task_snippet = load(str([task._ds.copy()]), Loader=SafeLoader)
+ task_yaml = dump(task_snippet, sort_keys=False, Dumper=SafeDumper)
+ self._display.display(f"\n{task_yaml}\n")
+ self._printed_message = True
+
+ def v2_playbook_on_task_start(self, task, is_conditional):
+ self._printed_message = False
+
+ def v2_runner_on_start(self, host, task):
+ if not self._printed_message:
+ self._print_task(task)
diff --git a/plugins/callback/say.py b/plugins/callback/say.py
index b1ab31f98a..8a4e93f353 100644
--- a/plugins/callback/say.py
+++ b/plugins/callback/say.py
@@ -5,8 +5,7 @@
# SPDX-License-Identifier: GPL-3.0-or-later
# Make coding more python3-ish
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
DOCUMENTATION = r"""
author: Unknown (!UNKNOWN)
@@ -15,9 +14,9 @@ type: notification
requirements:
- whitelisting in configuration
- the C(/usr/bin/say) command line program (standard on macOS) or C(espeak) command line program
-short_description: notify using software speech synthesizer
+short_description: Notify using software speech synthesizer
description:
- - This plugin will use the C(say) or C(espeak) program to "speak" about play events.
+ - This plugin uses C(say) or C(espeak) to "speak" about play events.
"""
import platform
@@ -50,7 +49,7 @@ class CallbackModule(CallbackBase):
self.synthesizer = get_bin_path('say')
if platform.system() != 'Darwin':
# 'say' binary available, it might be GNUstep tool which doesn't support 'voice' parameter
- self._display.warning("'say' executable found but system is '%s': ignoring voice parameter" % platform.system())
+ self._display.warning(f"'say' executable found but system is '{platform.system()}': ignoring voice parameter")
else:
self.FAILED_VOICE = 'Zarvox'
self.REGULAR_VOICE = 'Trinoids'
@@ -69,7 +68,7 @@ class CallbackModule(CallbackBase):
# ansible will not call any callback if disabled is set to True
if not self.synthesizer:
self.disabled = True
- self._display.warning("Unable to find either 'say' or 'espeak' executable, plugin %s disabled" % os.path.basename(__file__))
+ self._display.warning(f"Unable to find either 'say' or 'espeak' executable, plugin {os.path.basename(__file__)} disabled")
def say(self, msg, voice):
cmd = [self.synthesizer, msg]
@@ -78,7 +77,7 @@ class CallbackModule(CallbackBase):
subprocess.call(cmd)
def runner_on_failed(self, host, res, ignore_errors=False):
- self.say("Failure on host %s" % host, self.FAILED_VOICE)
+ self.say(f"Failure on host {host}", self.FAILED_VOICE)
def runner_on_ok(self, host, res):
self.say("pew", self.LASER_VOICE)
@@ -87,13 +86,13 @@ class CallbackModule(CallbackBase):
self.say("pew", self.LASER_VOICE)
def runner_on_unreachable(self, host, res):
- self.say("Failure on host %s" % host, self.FAILED_VOICE)
+ self.say(f"Failure on host {host}", self.FAILED_VOICE)
def runner_on_async_ok(self, host, res, jid):
self.say("pew", self.LASER_VOICE)
def runner_on_async_failed(self, host, res, jid):
- self.say("Failure on host %s" % host, self.FAILED_VOICE)
+ self.say(f"Failure on host {host}", self.FAILED_VOICE)
def playbook_on_start(self):
self.say("Running Playbook", self.REGULAR_VOICE)
@@ -103,15 +102,15 @@ class CallbackModule(CallbackBase):
def playbook_on_task_start(self, name, is_conditional):
if not is_conditional:
- self.say("Starting task: %s" % name, self.REGULAR_VOICE)
+ self.say(f"Starting task: {name}", self.REGULAR_VOICE)
else:
- self.say("Notifying task: %s" % name, self.REGULAR_VOICE)
+ self.say(f"Notifying task: {name}", self.REGULAR_VOICE)
def playbook_on_setup(self):
self.say("Gathering facts", self.REGULAR_VOICE)
def playbook_on_play_start(self, name):
- self.say("Starting play: %s" % name, self.HAPPY_VOICE)
+ self.say(f"Starting play: {name}", self.HAPPY_VOICE)
def playbook_on_stats(self, stats):
self.say("Play complete", self.HAPPY_VOICE)
diff --git a/plugins/callback/selective.py b/plugins/callback/selective.py
index 7915f1e8f3..53d40671bc 100644
--- a/plugins/callback/selective.py
+++ b/plugins/callback/selective.py
@@ -4,8 +4,7 @@
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
DOCUMENTATION = r"""
author: Unknown (!UNKNOWN)
@@ -13,7 +12,7 @@ name: selective
type: stdout
requirements:
- set as main display callback
-short_description: only print certain tasks
+short_description: Only print certain tasks
description:
- This callback only prints tasks that have been tagged with C(print_action) or that have failed. This allows operators
to focus on the tasks that provide value only.
@@ -48,13 +47,13 @@ from ansible.module_utils.common.text.converters import to_text
DONT_COLORIZE = False
COLORS = {
'normal': '\033[0m',
- 'ok': '\033[{0}m'.format(C.COLOR_CODES[C.COLOR_OK]),
+ 'ok': f'\x1b[{C.COLOR_CODES[C.COLOR_OK]}m',
'bold': '\033[1m',
'not_so_bold': '\033[1m\033[34m',
- 'changed': '\033[{0}m'.format(C.COLOR_CODES[C.COLOR_CHANGED]),
- 'failed': '\033[{0}m'.format(C.COLOR_CODES[C.COLOR_ERROR]),
+ 'changed': f'\x1b[{C.COLOR_CODES[C.COLOR_CHANGED]}m',
+ 'failed': f'\x1b[{C.COLOR_CODES[C.COLOR_ERROR]}m',
'endc': '\033[0m',
- 'skipped': '\033[{0}m'.format(C.COLOR_CODES[C.COLOR_SKIP]),
+ 'skipped': f'\x1b[{C.COLOR_CODES[C.COLOR_SKIP]}m',
}
@@ -73,7 +72,7 @@ def colorize(msg, color):
if DONT_COLORIZE:
return msg
else:
- return '{0}{1}{2}'.format(COLORS[color], msg, COLORS['endc'])
+ return f"{COLORS[color]}{msg}{COLORS['endc']}"
class CallbackModule(CallbackBase):
@@ -106,15 +105,15 @@ class CallbackModule(CallbackBase):
line_length = 120
if self.last_skipped:
print()
- line = "# {0} ".format(task_name)
- msg = colorize("{0}{1}".format(line, '*' * (line_length - len(line))), 'bold')
+ line = f"# {task_name} "
+ msg = colorize(f"{line}{'*' * (line_length - len(line))}", 'bold')
print(msg)
def _indent_text(self, text, indent_level):
lines = text.splitlines()
result_lines = []
for l in lines:
- result_lines.append("{0}{1}".format(' ' * indent_level, l))
+ result_lines.append(f"{' ' * indent_level}{l}")
return '\n'.join(result_lines)
def _print_diff(self, diff, indent_level):
@@ -147,19 +146,19 @@ class CallbackModule(CallbackBase):
change_string = colorize('FAILED!!!', color)
else:
color = 'changed' if changed else 'ok'
- change_string = colorize("changed={0}".format(changed), color)
+ change_string = colorize(f"changed={changed}", color)
msg = colorize(msg, color)
line_length = 120
spaces = ' ' * (40 - len(name) - indent_level)
- line = "{0} * {1}{2}- {3}".format(' ' * indent_level, name, spaces, change_string)
+ line = f"{' ' * indent_level} * {name}{spaces}- {change_string}"
if len(msg) < 50:
- line += ' -- {0}'.format(msg)
- print("{0} {1}---------".format(line, '-' * (line_length - len(line))))
+ line += f' -- {msg}'
+ print(f"{line} {'-' * (line_length - len(line))}---------")
else:
- print("{0} {1}".format(line, '-' * (line_length - len(line))))
+ print(f"{line} {'-' * (line_length - len(line))}")
print(self._indent_text(msg, indent_level + 4))
if diff:
@@ -239,8 +238,10 @@ class CallbackModule(CallbackBase):
else:
color = 'ok'
- msg = '{0} : ok={1}\tchanged={2}\tfailed={3}\tunreachable={4}\trescued={5}\tignored={6}'.format(
- host, s['ok'], s['changed'], s['failures'], s['unreachable'], s['rescued'], s['ignored'])
+ msg = (
+ f"{host} : ok={s['ok']}\tchanged={s['changed']}\tfailed={s['failures']}\tunreachable="
+ f"{s['unreachable']}\trescued={s['rescued']}\tignored={s['ignored']}"
+ )
print(colorize(msg, color))
def v2_runner_on_skipped(self, result, **kwargs):
@@ -252,17 +253,15 @@ class CallbackModule(CallbackBase):
line_length = 120
spaces = ' ' * (31 - len(result._host.name) - 4)
- line = " * {0}{1}- {2}".format(colorize(result._host.name, 'not_so_bold'),
- spaces,
- colorize("skipped", 'skipped'),)
+ line = f" * {colorize(result._host.name, 'not_so_bold')}{spaces}- {colorize('skipped', 'skipped')}"
reason = result._result.get('skipped_reason', '') or \
result._result.get('skip_reason', '')
if len(reason) < 50:
- line += ' -- {0}'.format(reason)
- print("{0} {1}---------".format(line, '-' * (line_length - len(line))))
+ line += f' -- {reason}'
+ print(f"{line} {'-' * (line_length - len(line))}---------")
else:
- print("{0} {1}".format(line, '-' * (line_length - len(line))))
+ print(f"{line} {'-' * (line_length - len(line))}")
print(self._indent_text(reason, 8))
print(reason)
diff --git a/plugins/callback/slack.py b/plugins/callback/slack.py
index 5a99797039..8bb081a541 100644
--- a/plugins/callback/slack.py
+++ b/plugins/callback/slack.py
@@ -5,8 +5,7 @@
# SPDX-License-Identifier: GPL-3.0-or-later
# Make coding more python3-ish
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
DOCUMENTATION = r"""
author: Unknown (!UNKNOWN)
@@ -19,6 +18,11 @@ short_description: Sends play events to a Slack channel
description:
- This is an ansible callback plugin that sends status updates to a Slack channel during playbook execution.
options:
+ http_agent:
+ description:
+ - HTTP user agent to use for requests to Slack.
+ type: string
+ version_added: "10.5.0"
webhook_url:
required: true
description: Slack Webhook URL.
@@ -62,7 +66,6 @@ import os
import uuid
from ansible import context
-from ansible.module_utils.common.text.converters import to_text
from ansible.module_utils.urls import open_url
from ansible.plugins.callback import CallbackBase
@@ -108,7 +111,7 @@ class CallbackModule(CallbackBase):
self.username = self.get_option('username')
self.show_invocation = (self._display.verbosity > 1)
self.validate_certs = self.get_option('validate_certs')
-
+ self.http_agent = self.get_option('http_agent')
if self.webhook_url is None:
self.disabled = True
self._display.warning('Slack Webhook URL was not provided. The '
@@ -134,18 +137,22 @@ class CallbackModule(CallbackBase):
self._display.debug(data)
self._display.debug(self.webhook_url)
try:
- response = open_url(self.webhook_url, data=data, validate_certs=self.validate_certs,
- headers=headers)
+ response = open_url(
+ self.webhook_url,
+ data=data,
+ validate_certs=self.validate_certs,
+ headers=headers,
+ http_agent=self.http_agent,
+ )
return response.read()
except Exception as e:
- self._display.warning(u'Could not submit message to Slack: %s' %
- to_text(e))
+ self._display.warning(f'Could not submit message to Slack: {e}')
def v2_playbook_on_start(self, playbook):
self.playbook_name = os.path.basename(playbook._file_name)
title = [
- '*Playbook initiated* (_%s_)' % self.guid
+ f'*Playbook initiated* (_{self.guid}_)'
]
invocation_items = []
@@ -156,23 +163,23 @@ class CallbackModule(CallbackBase):
subset = context.CLIARGS['subset']
inventory = [os.path.abspath(i) for i in context.CLIARGS['inventory']]
- invocation_items.append('Inventory: %s' % ', '.join(inventory))
+ invocation_items.append(f"Inventory: {', '.join(inventory)}")
if tags and tags != ['all']:
- invocation_items.append('Tags: %s' % ', '.join(tags))
+ invocation_items.append(f"Tags: {', '.join(tags)}")
if skip_tags:
- invocation_items.append('Skip Tags: %s' % ', '.join(skip_tags))
+ invocation_items.append(f"Skip Tags: {', '.join(skip_tags)}")
if subset:
- invocation_items.append('Limit: %s' % subset)
+ invocation_items.append(f'Limit: {subset}')
if extra_vars:
- invocation_items.append('Extra Vars: %s' %
- ' '.join(extra_vars))
+ invocation_items.append(f"Extra Vars: {' '.join(extra_vars)}")
- title.append('by *%s*' % context.CLIARGS['remote_user'])
+ title.append(f"by *{context.CLIARGS['remote_user']}*")
- title.append('\n\n*%s*' % self.playbook_name)
+ title.append(f'\n\n*{self.playbook_name}*')
msg_items = [' '.join(title)]
if invocation_items:
- msg_items.append('```\n%s\n```' % '\n'.join(invocation_items))
+ _inv_item = '\n'.join(invocation_items)
+ msg_items.append(f'```\n{_inv_item}\n```')
msg = '\n'.join(msg_items)
@@ -192,8 +199,8 @@ class CallbackModule(CallbackBase):
def v2_playbook_on_play_start(self, play):
"""Display Play start messages"""
- name = play.name or 'Play name not specified (%s)' % play._uuid
- msg = '*Starting play* (_%s_)\n\n*%s*' % (self.guid, name)
+ name = play.name or f'Play name not specified ({play._uuid})'
+ msg = f'*Starting play* (_{self.guid}_)\n\n*{name}*'
attachments = [
{
'fallback': msg,
@@ -228,7 +235,7 @@ class CallbackModule(CallbackBase):
attachments = []
msg_items = [
- '*Playbook Complete* (_%s_)' % self.guid
+ f'*Playbook Complete* (_{self.guid}_)'
]
if failures or unreachable:
color = 'danger'
@@ -237,7 +244,7 @@ class CallbackModule(CallbackBase):
color = 'good'
msg_items.append('\n*Success!*')
- msg_items.append('```\n%s\n```' % t)
+ msg_items.append(f'```\n{t}\n```')
msg = '\n'.join(msg_items)
diff --git a/plugins/callback/splunk.py b/plugins/callback/splunk.py
index 41b3b0b443..c385050d67 100644
--- a/plugins/callback/splunk.py
+++ b/plugins/callback/splunk.py
@@ -3,8 +3,7 @@
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
DOCUMENTATION = r"""
name: splunk
@@ -12,7 +11,7 @@ type: notification
short_description: Sends task result events to Splunk HTTP Event Collector
author: "Stuart Hirst (!UNKNOWN) "
description:
- - This callback plugin will send task results as JSON formatted events to a Splunk HTTP collector.
+ - This callback plugin sends task results as JSON formatted events to a Splunk HTTP collector.
- The companion Splunk Monitoring & Diagnostics App is available here U(https://splunkbase.splunk.com/app/4023/).
- Credit to "Ryan Currah (@ryancurrah)" for original source upon which this is based.
requirements:
@@ -92,6 +91,7 @@ import getpass
from os.path import basename
+from ansible.module_utils.ansible_release import __version__ as ansible_version
from ansible.module_utils.urls import open_url
from ansible.parsing.ajson import AnsibleJSONEncoder
from ansible.plugins.callback import CallbackBase
@@ -105,7 +105,6 @@ class SplunkHTTPCollectorSource(object):
def __init__(self):
self.ansible_check_mode = False
self.ansible_playbook = ""
- self.ansible_version = ""
self.session = str(uuid.uuid4())
self.host = socket.gethostname()
self.ip_address = socket.gethostbyname(socket.gethostname())
@@ -115,10 +114,6 @@ class SplunkHTTPCollectorSource(object):
if result._task_fields['args'].get('_ansible_check_mode') is True:
self.ansible_check_mode = True
- if result._task_fields['args'].get('_ansible_version'):
- self.ansible_version = \
- result._task_fields['args'].get('_ansible_version')
-
if result._task._role:
ansible_role = str(result._task._role)
else:
@@ -144,7 +139,7 @@ class SplunkHTTPCollectorSource(object):
data['ip_address'] = self.ip_address
data['user'] = self.user
data['runtime'] = runtime
- data['ansible_version'] = self.ansible_version
+ data['ansible_version'] = ansible_version
data['ansible_check_mode'] = self.ansible_check_mode
data['ansible_host'] = result._host.name
data['ansible_playbook'] = self.ansible_playbook
@@ -153,15 +148,14 @@ class SplunkHTTPCollectorSource(object):
data['ansible_result'] = result._result
# This wraps the json payload in and outer json event needed by Splunk
- jsondata = json.dumps(data, cls=AnsibleJSONEncoder, sort_keys=True)
- jsondata = '{"event":' + jsondata + "}"
+ jsondata = json.dumps({"event": data}, cls=AnsibleJSONEncoder, sort_keys=True)
open_url(
url,
jsondata,
headers={
'Content-type': 'application/json',
- 'Authorization': 'Splunk ' + authtoken
+ 'Authorization': f"Splunk {authtoken}"
},
method='POST',
validate_certs=validate_certs
diff --git a/plugins/callback/sumologic.py b/plugins/callback/sumologic.py
index 108f324b29..7a762c30e8 100644
--- a/plugins/callback/sumologic.py
+++ b/plugins/callback/sumologic.py
@@ -3,8 +3,7 @@
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
DOCUMENTATION = r"""
name: sumologic
@@ -12,7 +11,7 @@ type: notification
short_description: Sends task result events to Sumologic
author: "Ryan Currah (@ryancurrah)"
description:
- - This callback plugin will send task results as JSON formatted events to a Sumologic HTTP collector source.
+ - This callback plugin sends task results as JSON formatted events to a Sumologic HTTP collector source.
requirements:
- Whitelisting this callback plugin
- 'Create a HTTP collector source in Sumologic and specify a custom timestamp format of V(yyyy-MM-dd HH:mm:ss ZZZZ) and
@@ -49,6 +48,7 @@ import getpass
from os.path import basename
+from ansible.module_utils.ansible_release import __version__ as ansible_version
from ansible.module_utils.urls import open_url
from ansible.parsing.ajson import AnsibleJSONEncoder
from ansible.plugins.callback import CallbackBase
@@ -62,7 +62,6 @@ class SumologicHTTPCollectorSource(object):
def __init__(self):
self.ansible_check_mode = False
self.ansible_playbook = ""
- self.ansible_version = ""
self.session = str(uuid.uuid4())
self.host = socket.gethostname()
self.ip_address = socket.gethostbyname(socket.gethostname())
@@ -72,10 +71,6 @@ class SumologicHTTPCollectorSource(object):
if result._task_fields['args'].get('_ansible_check_mode') is True:
self.ansible_check_mode = True
- if result._task_fields['args'].get('_ansible_version'):
- self.ansible_version = \
- result._task_fields['args'].get('_ansible_version')
-
if result._task._role:
ansible_role = str(result._task._role)
else:
@@ -93,7 +88,7 @@ class SumologicHTTPCollectorSource(object):
data['ip_address'] = self.ip_address
data['user'] = self.user
data['runtime'] = runtime
- data['ansible_version'] = self.ansible_version
+ data['ansible_version'] = ansible_version
data['ansible_check_mode'] = self.ansible_check_mode
data['ansible_host'] = result._host.name
data['ansible_playbook'] = self.ansible_playbook
diff --git a/plugins/callback/syslog_json.py b/plugins/callback/syslog_json.py
index d1797455ac..cab3973be1 100644
--- a/plugins/callback/syslog_json.py
+++ b/plugins/callback/syslog_json.py
@@ -4,8 +4,7 @@
# SPDX-License-Identifier: GPL-3.0-or-later
# Make coding more python3-ish
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
DOCUMENTATION = r"""
author: Unknown (!UNKNOWN)
@@ -13,12 +12,12 @@ name: syslog_json
type: notification
requirements:
- whitelist in configuration
-short_description: sends JSON events to syslog
+short_description: Sends JSON events to syslog
description:
- This plugin logs ansible-playbook and ansible runs to a syslog server in JSON format.
options:
server:
- description: Syslog server that will receive the event.
+ description: Syslog server that receives the event.
type: str
env:
- name: SYSLOG_SERVER
diff --git a/plugins/callback/tasks_only.py b/plugins/callback/tasks_only.py
new file mode 100644
index 0000000000..f64c4c57db
--- /dev/null
+++ b/plugins/callback/tasks_only.py
@@ -0,0 +1,62 @@
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2025, Felix Fontein
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import annotations
+
+DOCUMENTATION = r"""
+author: Felix Fontein (@felixfontein)
+name: tasks_only
+type: stdout
+version_added: 11.1.0
+short_description: Only show tasks
+description:
+ - Removes play start and stats marker from P(ansible.builtin.default#callback)'s output.
+ - Can be used to generate output for documentation examples.
+ For this, the O(number_of_columns) option should be set to an explicit value.
+extends_documentation_fragment:
+ - default_callback
+options:
+ number_of_columns:
+ description:
+ - Sets the number of columns for Ansible's display.
+ type: int
+ env:
+ - name: ANSIBLE_COLLECTIONS_TASKS_ONLY_NUMBER_OF_COLUMNS
+"""
+
+EXAMPLES = r"""
+---
+# Enable callback in ansible.cfg:
+ansible_config: |-
+ [defaults]
+ stdout_callback = community.general.tasks_only
+
+---
+# Enable callback with environment variables:
+environment_variable: |-
+ ANSIBLE_STDOUT_CALLBACK=community.general.tasks_only
+"""
+
+from ansible.plugins.callback.default import CallbackModule as Default
+
+
+class CallbackModule(Default):
+ CALLBACK_VERSION = 2.0
+ CALLBACK_TYPE = 'stdout'
+ CALLBACK_NAME = 'community.general.tasks_only'
+
+ def v2_playbook_on_play_start(self, play):
+ pass
+
+ def v2_playbook_on_stats(self, stats):
+ pass
+
+ def set_options(self, *args, **kwargs):
+ result = super(CallbackModule, self).set_options(*args, **kwargs)
+ self.number_of_columns = self.get_option("number_of_columns")
+ if self.number_of_columns is not None:
+ self._display.columns = self.number_of_columns
+ return result
diff --git a/plugins/callback/timestamp.py b/plugins/callback/timestamp.py
index 262db47dc9..a43ddcbef9 100644
--- a/plugins/callback/timestamp.py
+++ b/plugins/callback/timestamp.py
@@ -5,9 +5,8 @@
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import absolute_import, division, print_function
+from __future__ import annotations
-__metaclass__ = type
DOCUMENTATION = r"""
name: timestamp
@@ -84,7 +83,7 @@ def banner(self, msg, color=None, cows=True):
msg = to_text(msg)
if self.b_cowsay and cows:
try:
- self.banner_cowsay("%s @ %s" % (msg, timestamp))
+ self.banner_cowsay(f"{msg} @ {timestamp}")
return
except OSError:
self.warning("somebody cleverly deleted cowsay or something during the PB run. heh.")
@@ -97,7 +96,7 @@ def banner(self, msg, color=None, cows=True):
if star_len <= 3:
star_len = 3
stars = "*" * star_len
- self.display("\n%s %s %s" % (msg, stars, timestamp), color=color)
+ self.display(f"\n{msg} {stars} {timestamp}", color=color)
class CallbackModule(Default):
diff --git a/plugins/callback/unixy.py b/plugins/callback/unixy.py
index e251a5f080..8fd8c10c94 100644
--- a/plugins/callback/unixy.py
+++ b/plugins/callback/unixy.py
@@ -5,14 +5,13 @@
# SPDX-License-Identifier: GPL-3.0-or-later
# Make coding more python3-ish
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
DOCUMENTATION = r"""
name: unixy
type: stdout
author: Al Bowles (@akatch)
-short_description: condensed Ansible output
+short_description: Condensed Ansible output
description:
- Consolidated Ansible output in the style of LINUX/UNIX startup logs.
extends_documentation_fragment:
@@ -67,24 +66,24 @@ class CallbackModule(CallbackModule_default):
def _process_result_output(self, result, msg):
task_host = result._host.get_name()
- task_result = "%s %s" % (task_host, msg)
+ task_result = f"{task_host} {msg}"
if self._run_is_verbose(result):
- task_result = "%s %s: %s" % (task_host, msg, self._dump_results(result._result, indent=4))
+ task_result = f"{task_host} {msg}: {self._dump_results(result._result, indent=4)}"
return task_result
if self.delegated_vars:
task_delegate_host = self.delegated_vars['ansible_host']
- task_result = "%s -> %s %s" % (task_host, task_delegate_host, msg)
+ task_result = f"{task_host} -> {task_delegate_host} {msg}"
if result._result.get('msg') and result._result.get('msg') != "All items completed":
- task_result += " | msg: " + to_text(result._result.get('msg'))
+ task_result += f" | msg: {to_text(result._result.get('msg'))}"
if result._result.get('stdout'):
- task_result += " | stdout: " + result._result.get('stdout')
+ task_result += f" | stdout: {result._result.get('stdout')}"
if result._result.get('stderr'):
- task_result += " | stderr: " + result._result.get('stderr')
+ task_result += f" | stderr: {result._result.get('stderr')}"
return task_result
@@ -92,30 +91,30 @@ class CallbackModule(CallbackModule_default):
self._get_task_display_name(task)
if self.task_display_name is not None:
if task.check_mode and self.get_option('check_mode_markers'):
- self._display.display("%s (check mode)..." % self.task_display_name)
+ self._display.display(f"{self.task_display_name} (check mode)...")
else:
- self._display.display("%s..." % self.task_display_name)
+ self._display.display(f"{self.task_display_name}...")
def v2_playbook_on_handler_task_start(self, task):
self._get_task_display_name(task)
if self.task_display_name is not None:
if task.check_mode and self.get_option('check_mode_markers'):
- self._display.display("%s (via handler in check mode)... " % self.task_display_name)
+ self._display.display(f"{self.task_display_name} (via handler in check mode)... ")
else:
- self._display.display("%s (via handler)... " % self.task_display_name)
+ self._display.display(f"{self.task_display_name} (via handler)... ")
def v2_playbook_on_play_start(self, play):
name = play.get_name().strip()
if play.check_mode and self.get_option('check_mode_markers'):
if name and play.hosts:
- msg = u"\n- %s (in check mode) on hosts: %s -" % (name, ",".join(play.hosts))
+ msg = f"\n- {name} (in check mode) on hosts: {','.join(play.hosts)} -"
else:
- msg = u"- check mode -"
+ msg = "- check mode -"
else:
if name and play.hosts:
- msg = u"\n- %s on hosts: %s -" % (name, ",".join(play.hosts))
+ msg = f"\n- {name} on hosts: {','.join(play.hosts)} -"
else:
- msg = u"---"
+ msg = "---"
self._display.display(msg)
@@ -126,7 +125,7 @@ class CallbackModule(CallbackModule_default):
msg = "skipped"
task_result = self._process_result_output(result, msg)
- self._display.display(" " + task_result, display_color)
+ self._display.display(f" {task_result}", display_color)
else:
return
@@ -136,10 +135,10 @@ class CallbackModule(CallbackModule_default):
msg = "failed"
item_value = self._get_item_label(result._result)
if item_value:
- msg += " | item: %s" % (item_value,)
+ msg += f" | item: {item_value}"
task_result = self._process_result_output(result, msg)
- self._display.display(" " + task_result, display_color, stderr=self.get_option('display_failed_stderr'))
+ self._display.display(f" {task_result}", display_color, stderr=self.get_option('display_failed_stderr'))
def v2_runner_on_ok(self, result, msg="ok", display_color=C.COLOR_OK):
self._preprocess_result(result)
@@ -149,13 +148,13 @@ class CallbackModule(CallbackModule_default):
msg = "done"
item_value = self._get_item_label(result._result)
if item_value:
- msg += " | item: %s" % (item_value,)
+ msg += f" | item: {item_value}"
display_color = C.COLOR_CHANGED
task_result = self._process_result_output(result, msg)
- self._display.display(" " + task_result, display_color)
+ self._display.display(f" {task_result}", display_color)
elif self.get_option('display_ok_hosts'):
task_result = self._process_result_output(result, msg)
- self._display.display(" " + task_result, display_color)
+ self._display.display(f" {task_result}", display_color)
def v2_runner_item_on_skipped(self, result):
self.v2_runner_on_skipped(result)
@@ -173,7 +172,7 @@ class CallbackModule(CallbackModule_default):
display_color = C.COLOR_UNREACHABLE
task_result = self._process_result_output(result, msg)
- self._display.display(" " + task_result, display_color, stderr=self.get_option('display_failed_stderr'))
+ self._display.display(f" {task_result}", display_color, stderr=self.get_option('display_failed_stderr'))
def v2_on_file_diff(self, result):
if result._task.loop and 'results' in result._result:
@@ -195,25 +194,17 @@ class CallbackModule(CallbackModule_default):
# TODO how else can we display these?
t = stats.summarize(h)
- self._display.display(u" %s : %s %s %s %s %s %s" % (
- hostcolor(h, t),
- colorize(u'ok', t['ok'], C.COLOR_OK),
- colorize(u'changed', t['changed'], C.COLOR_CHANGED),
- colorize(u'unreachable', t['unreachable'], C.COLOR_UNREACHABLE),
- colorize(u'failed', t['failures'], C.COLOR_ERROR),
- colorize(u'rescued', t['rescued'], C.COLOR_OK),
- colorize(u'ignored', t['ignored'], C.COLOR_WARN)),
+ self._display.display(
+ f" {hostcolor(h, t)} : {colorize('ok', t['ok'], C.COLOR_OK)} {colorize('changed', t['changed'], C.COLOR_CHANGED)} "
+ f"{colorize('unreachable', t['unreachable'], C.COLOR_UNREACHABLE)} {colorize('failed', t['failures'], C.COLOR_ERROR)} "
+ f"{colorize('rescued', t['rescued'], C.COLOR_OK)} {colorize('ignored', t['ignored'], C.COLOR_WARN)}",
screen_only=True
)
- self._display.display(u" %s : %s %s %s %s %s %s" % (
- hostcolor(h, t, False),
- colorize(u'ok', t['ok'], None),
- colorize(u'changed', t['changed'], None),
- colorize(u'unreachable', t['unreachable'], None),
- colorize(u'failed', t['failures'], None),
- colorize(u'rescued', t['rescued'], None),
- colorize(u'ignored', t['ignored'], None)),
+ self._display.display(
+ f" {hostcolor(h, t, False)} : {colorize('ok', t['ok'], None)} {colorize('changed', t['changed'], None)} "
+ f"{colorize('unreachable', t['unreachable'], None)} {colorize('failed', t['failures'], None)} {colorize('rescued', t['rescued'], None)} "
+ f"{colorize('ignored', t['ignored'], None)}",
log_only=True
)
if stats.custom and self.get_option('show_custom_stats'):
@@ -223,12 +214,14 @@ class CallbackModule(CallbackModule_default):
for k in sorted(stats.custom.keys()):
if k == '_run':
continue
- self._display.display('\t%s: %s' % (k, self._dump_results(stats.custom[k], indent=1).replace('\n', '')))
+ stat_val = self._dump_results(stats.custom[k], indent=1).replace('\n', '')
+ self._display.display(f'\t{k}: {stat_val}')
# print per run custom stats
if '_run' in stats.custom:
self._display.display("", screen_only=True)
- self._display.display('\tRUN: %s' % self._dump_results(stats.custom['_run'], indent=1).replace('\n', ''))
+ stat_val_run = self._dump_results(stats.custom['_run'], indent=1).replace('\n', '')
+ self._display.display(f'\tRUN: {stat_val_run}')
self._display.display("", screen_only=True)
def v2_playbook_on_no_hosts_matched(self):
@@ -239,23 +232,23 @@ class CallbackModule(CallbackModule_default):
def v2_playbook_on_start(self, playbook):
if context.CLIARGS['check'] and self.get_option('check_mode_markers'):
- self._display.display("Executing playbook %s in check mode" % basename(playbook._file_name))
+ self._display.display(f"Executing playbook {basename(playbook._file_name)} in check mode")
else:
- self._display.display("Executing playbook %s" % basename(playbook._file_name))
+ self._display.display(f"Executing playbook {basename(playbook._file_name)}")
# show CLI arguments
if self._display.verbosity > 3:
if context.CLIARGS.get('args'):
- self._display.display('Positional arguments: %s' % ' '.join(context.CLIARGS['args']),
+ self._display.display(f"Positional arguments: {' '.join(context.CLIARGS['args'])}",
color=C.COLOR_VERBOSE, screen_only=True)
for argument in (a for a in context.CLIARGS if a != 'args'):
val = context.CLIARGS[argument]
if val:
- self._display.vvvv('%s: %s' % (argument, val))
+ self._display.vvvv(f'{argument}: {val}')
def v2_runner_retry(self, result):
- msg = " Retrying... (%d of %d)" % (result._result['attempts'], result._result['retries'])
+ msg = f" Retrying... ({result._result['attempts']} of {result._result['retries']})"
if self._run_is_verbose(result):
- msg += "Result was: %s" % self._dump_results(result._result)
+ msg += f"Result was: {self._dump_results(result._result)}"
self._display.display(msg, color=C.COLOR_DEBUG)
diff --git a/plugins/callback/yaml.py b/plugins/callback/yaml.py
index 89b6b90dcc..f02840c9c6 100644
--- a/plugins/callback/yaml.py
+++ b/plugins/callback/yaml.py
@@ -4,14 +4,18 @@
# SPDX-License-Identifier: GPL-3.0-or-later
# Make coding more python3-ish
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
DOCUMENTATION = r"""
author: Unknown (!UNKNOWN)
name: yaml
type: stdout
short_description: YAML-ized Ansible screen output
+deprecated:
+ removed_in: 12.0.0
+ why: Starting in ansible-core 2.13, the P(ansible.builtin.default#callback) callback has support for printing output in
+ YAML format.
+ alternative: Use O(ansible.builtin.default#callback:result_format=yaml).
description:
- Ansible output that can be quite a bit easier to read than the default JSON formatting.
extends_documentation_fragment:
@@ -43,7 +47,7 @@ from ansible.plugins.callback.default import CallbackModule as Default
# from http://stackoverflow.com/a/15423007/115478
def should_use_block(value):
"""Returns true if string should be in block format"""
- for c in u"\u000a\u000d\u001c\u001d\u001e\u0085\u2028\u2029":
+ for c in "\u000a\u000d\u001c\u001d\u001e\u0085\u2028\u2029":
if c in value:
return True
return False
@@ -115,6 +119,9 @@ except ImportError:
def transform_recursively(value, transform):
+ # Since 2.19.0b7, this should no longer be needed:
+ # https://github.com/ansible/ansible/issues/85325
+ # https://github.com/ansible/ansible/pull/85389
if isinstance(value, Mapping):
return {transform(k): transform(v) for k, v in value.items()}
if isinstance(value, Sequence) and not isinstance(value, (str, bytes)):
@@ -159,11 +166,11 @@ class CallbackModule(Default):
# put changed and skipped into a header line
if 'changed' in abridged_result:
- dumped += 'changed=' + str(abridged_result['changed']).lower() + ' '
+ dumped += f"changed={str(abridged_result['changed']).lower()} "
del abridged_result['changed']
if 'skipped' in abridged_result:
- dumped += 'skipped=' + str(abridged_result['skipped']).lower() + ' '
+ dumped += f"skipped={str(abridged_result['skipped']).lower()} "
del abridged_result['skipped']
# if we already have stdout, we don't need stdout_lines
diff --git a/plugins/connection/chroot.py b/plugins/connection/chroot.py
index 9e0a0e73a9..842c3f05d3 100644
--- a/plugins/connection/chroot.py
+++ b/plugins/connection/chroot.py
@@ -7,8 +7,7 @@
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
DOCUMENTATION = r"""
author: Maykel Moya (!UNKNOWN)
@@ -81,7 +80,7 @@ from ansible.errors import AnsibleError
from ansible.module_utils.basic import is_executable
from ansible.module_utils.common.process import get_bin_path
from ansible.module_utils.six.moves import shlex_quote
-from ansible.module_utils.common.text.converters import to_bytes, to_native
+from ansible.module_utils.common.text.converters import to_bytes
from ansible.plugins.connection import ConnectionBase, BUFSIZE
from ansible.utils.display import Display
@@ -107,15 +106,15 @@ class Connection(ConnectionBase):
# do some trivial checks for ensuring 'host' is actually a chroot'able dir
if not os.path.isdir(self.chroot):
- raise AnsibleError("%s is not a directory" % self.chroot)
+ raise AnsibleError(f"{self.chroot} is not a directory")
chrootsh = os.path.join(self.chroot, 'bin/sh')
# Want to check for a usable bourne shell inside the chroot.
# is_executable() == True is sufficient. For symlinks it
# gets really complicated really fast. So we punt on finding that
- # out. As long as it's a symlink we assume that it will work
+ # out. As long as it is a symlink we assume that it will work
if not (is_executable(chrootsh) or (os.path.lexists(chrootsh) and os.path.islink(chrootsh))):
- raise AnsibleError("%s does not look like a chrootable dir (/bin/sh missing)" % self.chroot)
+ raise AnsibleError(f"{self.chroot} does not look like a chrootable dir (/bin/sh missing)")
def _connect(self):
""" connect to the chroot """
@@ -130,7 +129,7 @@ class Connection(ConnectionBase):
try:
self.chroot_cmd = get_bin_path(self.get_option('chroot_exe'))
except ValueError as e:
- raise AnsibleError(to_native(e))
+ raise AnsibleError(str(e))
super(Connection, self)._connect()
if not self._connected:
@@ -148,7 +147,7 @@ class Connection(ConnectionBase):
executable = self.get_option('executable')
local_cmd = [self.chroot_cmd, self.chroot, executable, '-c', cmd]
- display.vvv("EXEC %s" % local_cmd, host=self.chroot)
+ display.vvv(f"EXEC {local_cmd}", host=self.chroot)
local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd]
p = subprocess.Popen(local_cmd, shell=False, stdin=stdin,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
@@ -173,7 +172,7 @@ class Connection(ConnectionBase):
exist in any given chroot. So for now we're choosing "/" instead.
This also happens to be the former default.
- Can revisit using $HOME instead if it's a problem
+ Can revisit using $HOME instead if it is a problem
"""
if not remote_path.startswith(os.path.sep):
remote_path = os.path.join(os.path.sep, remote_path)
@@ -182,7 +181,7 @@ class Connection(ConnectionBase):
def put_file(self, in_path, out_path):
""" transfer a file from local to chroot """
super(Connection, self).put_file(in_path, out_path)
- display.vvv("PUT %s TO %s" % (in_path, out_path), host=self.chroot)
+ display.vvv(f"PUT {in_path} TO {out_path}", host=self.chroot)
out_path = shlex_quote(self._prefix_login_path(out_path))
try:
@@ -192,27 +191,27 @@ class Connection(ConnectionBase):
else:
count = ''
try:
- p = self._buffered_exec_command('dd of=%s bs=%s%s' % (out_path, BUFSIZE, count), stdin=in_file)
+ p = self._buffered_exec_command(f'dd of={out_path} bs={BUFSIZE}{count}', stdin=in_file)
except OSError:
raise AnsibleError("chroot connection requires dd command in the chroot")
try:
stdout, stderr = p.communicate()
except Exception:
traceback.print_exc()
- raise AnsibleError("failed to transfer file %s to %s" % (in_path, out_path))
+ raise AnsibleError(f"failed to transfer file {in_path} to {out_path}")
if p.returncode != 0:
- raise AnsibleError("failed to transfer file %s to %s:\n%s\n%s" % (in_path, out_path, stdout, stderr))
+ raise AnsibleError(f"failed to transfer file {in_path} to {out_path}:\n{stdout}\n{stderr}")
except IOError:
- raise AnsibleError("file or module does not exist at: %s" % in_path)
+ raise AnsibleError(f"file or module does not exist at: {in_path}")
def fetch_file(self, in_path, out_path):
""" fetch a file from chroot to local """
super(Connection, self).fetch_file(in_path, out_path)
- display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self.chroot)
+ display.vvv(f"FETCH {in_path} TO {out_path}", host=self.chroot)
in_path = shlex_quote(self._prefix_login_path(in_path))
try:
- p = self._buffered_exec_command('dd if=%s bs=%s' % (in_path, BUFSIZE))
+ p = self._buffered_exec_command(f'dd if={in_path} bs={BUFSIZE}')
except OSError:
raise AnsibleError("chroot connection requires dd command in the chroot")
@@ -224,10 +223,10 @@ class Connection(ConnectionBase):
chunk = p.stdout.read(BUFSIZE)
except Exception:
traceback.print_exc()
- raise AnsibleError("failed to transfer file %s to %s" % (in_path, out_path))
+ raise AnsibleError(f"failed to transfer file {in_path} to {out_path}")
stdout, stderr = p.communicate()
if p.returncode != 0:
- raise AnsibleError("failed to transfer file %s to %s:\n%s\n%s" % (in_path, out_path, stdout, stderr))
+ raise AnsibleError(f"failed to transfer file {in_path} to {out_path}:\n{stdout}\n{stderr}")
def close(self):
""" terminate the connection; nothing to do here """
diff --git a/plugins/connection/funcd.py b/plugins/connection/funcd.py
index 86d085fc09..ad01326aff 100644
--- a/plugins/connection/funcd.py
+++ b/plugins/connection/funcd.py
@@ -6,8 +6,7 @@
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
DOCUMENTATION = r"""
author: Michael Scherer (@mscherer)
@@ -72,7 +71,7 @@ class Connection(ConnectionBase):
raise AnsibleError("Internal Error: this module does not support optimized module pipelining")
# totally ignores privilege escalation
- display.vvv("EXEC %s" % cmd, host=self.host)
+ display.vvv(f"EXEC {cmd}", host=self.host)
p = self.client.command.run(cmd)[self.host]
return p[0], p[1], p[2]
@@ -87,14 +86,14 @@ class Connection(ConnectionBase):
""" transfer a file from local to remote """
out_path = self._normalize_path(out_path, '/')
- display.vvv("PUT %s TO %s" % (in_path, out_path), host=self.host)
+ display.vvv(f"PUT {in_path} TO {out_path}", host=self.host)
self.client.local.copyfile.send(in_path, out_path)
def fetch_file(self, in_path, out_path):
""" fetch a file from remote to local """
in_path = self._normalize_path(in_path, '/')
- display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self.host)
+ display.vvv(f"FETCH {in_path} TO {out_path}", host=self.host)
# need to use a tmp dir due to difference of semantic for getfile
# ( who take a # directory as destination) and fetch_file, who
# take a file directly
diff --git a/plugins/connection/incus.py b/plugins/connection/incus.py
index b9226284c2..4f73d05532 100644
--- a/plugins/connection/incus.py
+++ b/plugins/connection/incus.py
@@ -5,8 +5,7 @@
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
DOCUMENTATION = r"""
author: Stéphane Graber (@stgraber)
@@ -33,6 +32,15 @@ options:
vars:
- name: ansible_executable
- name: ansible_incus_executable
+ incus_become_method:
+ description:
+ - Become command used to switch to a non-root user.
+ - Is only used when O(remote_user) is not V(root).
+ type: str
+ default: /bin/su
+ vars:
+ - name: incus_become_method
+ version_added: 10.4.0
remote:
description:
- The name of the Incus remote to use (per C(incus remote list)).
@@ -41,6 +49,22 @@ options:
default: local
vars:
- name: ansible_incus_remote
+ remote_user:
+ description:
+ - User to login/authenticate as.
+ - Can be set from the CLI with the C(--user) or C(-u) options.
+ type: string
+ default: root
+ vars:
+ - name: ansible_user
+ env:
+ - name: ANSIBLE_REMOTE_USER
+ ini:
+ - section: defaults
+ key: remote_user
+ keyword:
+ - name: remote_user
+ version_added: 10.4.0
project:
description:
- The name of the Incus project to use (per C(incus project list)).
@@ -65,7 +89,6 @@ class Connection(ConnectionBase):
transport = "incus"
has_pipelining = True
- default_user = 'root'
def __init__(self, play_context, new_stdin, *args, **kwargs):
super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs)
@@ -80,10 +103,34 @@ class Connection(ConnectionBase):
super(Connection, self)._connect()
if not self._connected:
- self._display.vvv(u"ESTABLISH Incus CONNECTION FOR USER: root",
+ self._display.vvv(f"ESTABLISH Incus CONNECTION FOR USER: {self.get_option('remote_user')}",
host=self._instance())
self._connected = True
+ def _build_command(self, cmd) -> str:
+ """build the command to execute on the incus host"""
+
+ exec_cmd = [
+ self._incus_cmd,
+ "--project", self.get_option("project"),
+ "exec",
+ f"{self.get_option('remote')}:{self._instance()}",
+ "--"]
+
+ if self.get_option("remote_user") != "root":
+ self._display.vvv(
+ f"INFO: Running as non-root user: {self.get_option('remote_user')}, \
+ trying to run 'incus exec' with become method: {self.get_option('incus_become_method')}",
+ host=self._instance(),
+ )
+ exec_cmd.extend(
+ [self.get_option("incus_become_method"), self.get_option("remote_user"), "-c"]
+ )
+
+ exec_cmd.extend([self.get_option("executable"), "-c", cmd])
+
+ return exec_cmd
+
def _instance(self):
# Return only the leading part of the FQDN as the instance name
# as Incus instance names cannot be a FQDN.
@@ -93,16 +140,11 @@ class Connection(ConnectionBase):
""" execute a command on the Incus host """
super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
- self._display.vvv(u"EXEC {0}".format(cmd),
+ self._display.vvv(f"EXEC {cmd}",
host=self._instance())
- local_cmd = [
- self._incus_cmd,
- "--project", self.get_option("project"),
- "exec",
- "%s:%s" % (self.get_option("remote"), self._instance()),
- "--",
- self._play_context.executable, "-c", cmd]
+ local_cmd = self._build_command(cmd)
+ self._display.vvvvv(f"EXEC {local_cmd}", host=self._instance())
local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd]
in_data = to_bytes(in_data, errors='surrogate_or_strict', nonstring='passthru')
@@ -113,34 +155,96 @@ class Connection(ConnectionBase):
stdout = to_text(stdout)
stderr = to_text(stderr)
- if stderr == "Error: Instance is not running.\n":
- raise AnsibleConnectionFailure("instance not running: %s" %
- self._instance())
+ if stderr.startswith("Error: ") and stderr.rstrip().endswith(
+ ": Instance is not running"
+ ):
+ raise AnsibleConnectionFailure(
+ f"instance not running: {self._instance()} (remote={self.get_option('remote')}, project={self.get_option('project')})"
+ )
- if stderr == "Error: Instance not found\n":
- raise AnsibleConnectionFailure("instance not found: %s" %
- self._instance())
+ if stderr.startswith("Error: ") and stderr.rstrip().endswith(
+ ": Instance not found"
+ ):
+ raise AnsibleConnectionFailure(
+ f"instance not found: {self._instance()} (remote={self.get_option('remote')}, project={self.get_option('project')})"
+ )
+
+ if (
+ stderr.startswith("Error: ")
+ and ": User does not have permission " in stderr
+ ):
+ raise AnsibleConnectionFailure(
+ f"instance access denied: {self._instance()} (remote={self.get_option('remote')}, project={self.get_option('project')})"
+ )
+
+ if (
+ stderr.startswith("Error: ")
+ and ": User does not have entitlement " in stderr
+ ):
+ raise AnsibleConnectionFailure(
+ f"instance access denied: {self._instance()} (remote={self.get_option('remote')}, project={self.get_option('project')})"
+ )
return process.returncode, stdout, stderr
+ def _get_remote_uid_gid(self) -> tuple[int, int]:
+ """Get the user and group ID of 'remote_user' from the instance."""
+
+ rc, uid_out, err = self.exec_command("/bin/id -u")
+ if rc != 0:
+ raise AnsibleError(
+ f"Failed to get remote uid for user {self.get_option('remote_user')}: {err}"
+ )
+ uid = uid_out.strip()
+
+ rc, gid_out, err = self.exec_command("/bin/id -g")
+ if rc != 0:
+ raise AnsibleError(
+ f"Failed to get remote gid for user {self.get_option('remote_user')}: {err}"
+ )
+ gid = gid_out.strip()
+
+ return int(uid), int(gid)
+
def put_file(self, in_path, out_path):
""" put a file from local to Incus """
super(Connection, self).put_file(in_path, out_path)
- self._display.vvv(u"PUT {0} TO {1}".format(in_path, out_path),
+ self._display.vvv(f"PUT {in_path} TO {out_path}",
host=self._instance())
if not os.path.isfile(to_bytes(in_path, errors='surrogate_or_strict')):
- raise AnsibleFileNotFound("input path is not a file: %s" % in_path)
+ raise AnsibleFileNotFound(f"input path is not a file: {in_path}")
- local_cmd = [
- self._incus_cmd,
- "--project", self.get_option("project"),
- "file", "push", "--quiet",
- in_path,
- "%s:%s/%s" % (self.get_option("remote"),
- self._instance(),
- out_path)]
+ if self.get_option("remote_user") != "root":
+ uid, gid = self._get_remote_uid_gid()
+ local_cmd = [
+ self._incus_cmd,
+ "--project",
+ self.get_option("project"),
+ "file",
+ "push",
+ "--uid",
+ str(uid),
+ "--gid",
+ str(gid),
+ "--quiet",
+ in_path,
+ f"{self.get_option('remote')}:{self._instance()}/{out_path}",
+ ]
+ else:
+ local_cmd = [
+ self._incus_cmd,
+ "--project",
+ self.get_option("project"),
+ "file",
+ "push",
+ "--quiet",
+ in_path,
+ f"{self.get_option('remote')}:{self._instance()}/{out_path}",
+ ]
+
+ self._display.vvvvv(f"PUT {local_cmd}", host=self._instance())
local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd]
@@ -150,16 +254,14 @@ class Connection(ConnectionBase):
""" fetch a file from Incus to local """
super(Connection, self).fetch_file(in_path, out_path)
- self._display.vvv(u"FETCH {0} TO {1}".format(in_path, out_path),
+ self._display.vvv(f"FETCH {in_path} TO {out_path}",
host=self._instance())
local_cmd = [
self._incus_cmd,
"--project", self.get_option("project"),
"file", "pull", "--quiet",
- "%s:%s/%s" % (self.get_option("remote"),
- self._instance(),
- in_path),
+ f"{self.get_option('remote')}:{self._instance()}/{in_path}",
out_path]
local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd]
diff --git a/plugins/connection/iocage.py b/plugins/connection/iocage.py
index 8c39f07cd3..35d5ab0658 100644
--- a/plugins/connection/iocage.py
+++ b/plugins/connection/iocage.py
@@ -7,8 +7,7 @@
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
DOCUMENTATION = r"""
author: Stephan Lohse (!UNKNOWN)
@@ -55,11 +54,12 @@ class Connection(Jail):
jail_uuid = self.get_jail_uuid()
- kwargs[Jail.modified_jailname_key] = 'ioc-{0}'.format(jail_uuid)
+ kwargs[Jail.modified_jailname_key] = f'ioc-{jail_uuid}'
- display.vvv(u"Jail {iocjail} has been translated to {rawjail}".format(
- iocjail=self.ioc_jail, rawjail=kwargs[Jail.modified_jailname_key]),
- host=kwargs[Jail.modified_jailname_key])
+ display.vvv(
+ f"Jail {self.ioc_jail} has been translated to {kwargs[Jail.modified_jailname_key]}",
+ host=kwargs[Jail.modified_jailname_key]
+ )
super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs)
@@ -81,6 +81,6 @@ class Connection(Jail):
p.wait()
if p.returncode != 0:
- raise AnsibleError(u"iocage returned an error: {0}".format(stdout))
+ raise AnsibleError(f"iocage returned an error: {stdout}")
return stdout.strip('\n')
diff --git a/plugins/connection/jail.py b/plugins/connection/jail.py
index a5a21ff635..6f06c96774 100644
--- a/plugins/connection/jail.py
+++ b/plugins/connection/jail.py
@@ -7,8 +7,7 @@
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
DOCUMENTATION = r"""
author: Ansible Core Team
@@ -75,14 +74,14 @@ class Connection(ConnectionBase):
self.jexec_cmd = self._search_executable('jexec')
if self.jail not in self.list_jails():
- raise AnsibleError("incorrect jail name %s" % self.jail)
+ raise AnsibleError(f"incorrect jail name {self.jail}")
@staticmethod
def _search_executable(executable):
try:
return get_bin_path(executable)
except ValueError:
- raise AnsibleError("%s command not found in PATH" % executable)
+ raise AnsibleError(f"{executable} command not found in PATH")
def list_jails(self):
p = subprocess.Popen([self.jls_cmd, '-q', 'name'],
@@ -97,7 +96,7 @@ class Connection(ConnectionBase):
""" connect to the jail; nothing to do here """
super(Connection, self)._connect()
if not self._connected:
- display.vvv(u"ESTABLISH JAIL CONNECTION FOR USER: {0}".format(self._play_context.remote_user), host=self.jail)
+ display.vvv(f"ESTABLISH JAIL CONNECTION FOR USER: {self._play_context.remote_user}", host=self.jail)
self._connected = True
def _buffered_exec_command(self, cmd, stdin=subprocess.PIPE):
@@ -115,11 +114,11 @@ class Connection(ConnectionBase):
if self._play_context.remote_user is not None:
local_cmd += ['-U', self._play_context.remote_user]
# update HOME since -U does not update the jail environment
- set_env = 'HOME=~' + self._play_context.remote_user + ' '
+ set_env = f"HOME=~{self._play_context.remote_user} "
local_cmd += [self.jail, self._play_context.executable, '-c', set_env + cmd]
- display.vvv("EXEC %s" % (local_cmd,), host=self.jail)
+ display.vvv(f"EXEC {local_cmd}", host=self.jail)
local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd]
p = subprocess.Popen(local_cmd, shell=False, stdin=stdin,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
@@ -144,7 +143,7 @@ class Connection(ConnectionBase):
exist in any given chroot. So for now we're choosing "/" instead.
This also happens to be the former default.
- Can revisit using $HOME instead if it's a problem
+ Can revisit using $HOME instead if it is a problem
"""
if not remote_path.startswith(os.path.sep):
remote_path = os.path.join(os.path.sep, remote_path)
@@ -153,7 +152,7 @@ class Connection(ConnectionBase):
def put_file(self, in_path, out_path):
""" transfer a file from local to jail """
super(Connection, self).put_file(in_path, out_path)
- display.vvv("PUT %s TO %s" % (in_path, out_path), host=self.jail)
+ display.vvv(f"PUT {in_path} TO {out_path}", host=self.jail)
out_path = shlex_quote(self._prefix_login_path(out_path))
try:
@@ -163,27 +162,27 @@ class Connection(ConnectionBase):
else:
count = ''
try:
- p = self._buffered_exec_command('dd of=%s bs=%s%s' % (out_path, BUFSIZE, count), stdin=in_file)
+ p = self._buffered_exec_command(f'dd of={out_path} bs={BUFSIZE}{count}', stdin=in_file)
except OSError:
raise AnsibleError("jail connection requires dd command in the jail")
try:
stdout, stderr = p.communicate()
except Exception:
traceback.print_exc()
- raise AnsibleError("failed to transfer file %s to %s" % (in_path, out_path))
+ raise AnsibleError(f"failed to transfer file {in_path} to {out_path}")
if p.returncode != 0:
- raise AnsibleError("failed to transfer file %s to %s:\n%s\n%s" % (in_path, out_path, to_native(stdout), to_native(stderr)))
+ raise AnsibleError(f"failed to transfer file {in_path} to {out_path}:\n{to_native(stdout)}\n{to_native(stderr)}")
except IOError:
- raise AnsibleError("file or module does not exist at: %s" % in_path)
+ raise AnsibleError(f"file or module does not exist at: {in_path}")
def fetch_file(self, in_path, out_path):
""" fetch a file from jail to local """
super(Connection, self).fetch_file(in_path, out_path)
- display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self.jail)
+ display.vvv(f"FETCH {in_path} TO {out_path}", host=self.jail)
in_path = shlex_quote(self._prefix_login_path(in_path))
try:
- p = self._buffered_exec_command('dd if=%s bs=%s' % (in_path, BUFSIZE))
+ p = self._buffered_exec_command(f'dd if={in_path} bs={BUFSIZE}')
except OSError:
raise AnsibleError("jail connection requires dd command in the jail")
@@ -195,10 +194,10 @@ class Connection(ConnectionBase):
chunk = p.stdout.read(BUFSIZE)
except Exception:
traceback.print_exc()
- raise AnsibleError("failed to transfer file %s to %s" % (in_path, out_path))
+ raise AnsibleError(f"failed to transfer file {in_path} to {out_path}")
stdout, stderr = p.communicate()
if p.returncode != 0:
- raise AnsibleError("failed to transfer file %s to %s:\n%s\n%s" % (in_path, out_path, to_native(stdout), to_native(stderr)))
+ raise AnsibleError(f"failed to transfer file {in_path} to {out_path}:\n{to_native(stdout)}\n{to_native(stderr)}")
def close(self):
""" terminate the connection; nothing to do here """
diff --git a/plugins/connection/lxc.py b/plugins/connection/lxc.py
index 7f7f3f9242..a9e46cf56f 100644
--- a/plugins/connection/lxc.py
+++ b/plugins/connection/lxc.py
@@ -4,8 +4,7 @@
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
DOCUMENTATION = r"""
author: Joerg Thalheim (!UNKNOWN)
@@ -82,7 +81,7 @@ class Connection(ConnectionBase):
self._display.vvv("THIS IS A LOCAL LXC DIR", host=self.container_name)
self.container = _lxc.Container(self.container_name)
if self.container.state == "STOPPED":
- raise errors.AnsibleError("%s is not running" % self.container_name)
+ raise errors.AnsibleError(f"{self.container_name} is not running")
@staticmethod
def _communicate(pid, in_data, stdin, stdout, stderr):
@@ -144,10 +143,10 @@ class Connection(ConnectionBase):
read_stdin, write_stdin = os.pipe()
kwargs['stdin'] = self._set_nonblocking(read_stdin)
- self._display.vvv("EXEC %s" % (local_cmd), host=self.container_name)
+ self._display.vvv(f"EXEC {local_cmd}", host=self.container_name)
pid = self.container.attach(_lxc.attach_run_command, local_cmd, **kwargs)
if pid == -1:
- msg = "failed to attach to container %s" % self.container_name
+ msg = f"failed to attach to container {self.container_name}"
raise errors.AnsibleError(msg)
write_stdout = os.close(write_stdout)
@@ -174,18 +173,18 @@ class Connection(ConnectionBase):
def put_file(self, in_path, out_path):
''' transfer a file from local to lxc '''
super(Connection, self).put_file(in_path, out_path)
- self._display.vvv("PUT %s TO %s" % (in_path, out_path), host=self.container_name)
+ self._display.vvv(f"PUT {in_path} TO {out_path}", host=self.container_name)
in_path = to_bytes(in_path, errors='surrogate_or_strict')
out_path = to_bytes(out_path, errors='surrogate_or_strict')
if not os.path.exists(in_path):
- msg = "file or module does not exist: %s" % in_path
+ msg = f"file or module does not exist: {in_path}"
raise errors.AnsibleFileNotFound(msg)
try:
src_file = open(in_path, "rb")
except IOError:
traceback.print_exc()
- raise errors.AnsibleError("failed to open input file to %s" % in_path)
+ raise errors.AnsibleError(f"failed to open input file to {in_path}")
try:
def write_file(args):
with open(out_path, 'wb+') as dst_file:
@@ -194,7 +193,7 @@ class Connection(ConnectionBase):
self.container.attach_wait(write_file, None)
except IOError:
traceback.print_exc()
- msg = "failed to transfer file to %s" % out_path
+ msg = f"failed to transfer file to {out_path}"
raise errors.AnsibleError(msg)
finally:
src_file.close()
@@ -202,7 +201,7 @@ class Connection(ConnectionBase):
def fetch_file(self, in_path, out_path):
''' fetch a file from lxc to local '''
super(Connection, self).fetch_file(in_path, out_path)
- self._display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self.container_name)
+ self._display.vvv(f"FETCH {in_path} TO {out_path}", host=self.container_name)
in_path = to_bytes(in_path, errors='surrogate_or_strict')
out_path = to_bytes(out_path, errors='surrogate_or_strict')
@@ -210,7 +209,7 @@ class Connection(ConnectionBase):
dst_file = open(out_path, "wb")
except IOError:
traceback.print_exc()
- msg = "failed to open output file %s" % out_path
+ msg = f"failed to open output file {out_path}"
raise errors.AnsibleError(msg)
try:
def write_file(args):
@@ -225,7 +224,7 @@ class Connection(ConnectionBase):
self.container.attach_wait(write_file, None)
except IOError:
traceback.print_exc()
- msg = "failed to transfer file from %s to %s" % (in_path, out_path)
+ msg = f"failed to transfer file from {in_path} to {out_path}"
raise errors.AnsibleError(msg)
finally:
dst_file.close()
diff --git a/plugins/connection/lxd.py b/plugins/connection/lxd.py
index 739708eebd..2cc774a1d4 100644
--- a/plugins/connection/lxd.py
+++ b/plugins/connection/lxd.py
@@ -4,8 +4,7 @@
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
DOCUMENTATION = r"""
author: Matt Clay (@mattclay)
@@ -33,6 +32,15 @@ options:
vars:
- name: ansible_executable
- name: ansible_lxd_executable
+ lxd_become_method:
+ description:
+ - Become command used to switch to a non-root user.
+ - Is only used when O(remote_user) is not V(root).
+ type: str
+ default: /bin/su
+ vars:
+ - name: lxd_become_method
+ version_added: 10.4.0
remote:
description:
- Name of the LXD remote to use.
@@ -41,6 +49,22 @@ options:
vars:
- name: ansible_lxd_remote
version_added: 2.0.0
+ remote_user:
+ description:
+ - User to login/authenticate as.
+ - Can be set from the CLI with the C(--user) or C(-u) options.
+ type: string
+ default: root
+ vars:
+ - name: ansible_user
+ env:
+ - name: ANSIBLE_REMOTE_USER
+ ini:
+ - section: defaults
+ key: remote_user
+ keyword:
+ - name: remote_user
+ version_added: 10.4.0
project:
description:
- Name of the LXD project to use.
@@ -64,7 +88,6 @@ class Connection(ConnectionBase):
transport = 'community.general.lxd'
has_pipelining = True
- default_user = 'root'
def __init__(self, play_context, new_stdin, *args, **kwargs):
super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs)
@@ -74,9 +97,6 @@ class Connection(ConnectionBase):
except ValueError:
raise AnsibleError("lxc command not found in PATH")
- if self._play_context.remote_user is not None and self._play_context.remote_user != 'root':
- self._display.warning('lxd does not support remote_user, using default: root')
-
def _host(self):
""" translate remote_addr to lxd (short) hostname """
return self.get_option("remote_addr").split(".", 1)[0]
@@ -86,26 +106,41 @@ class Connection(ConnectionBase):
super(Connection, self)._connect()
if not self._connected:
- self._display.vvv(u"ESTABLISH LXD CONNECTION FOR USER: root", host=self._host())
+ self._display.vvv(f"ESTABLISH LXD CONNECTION FOR USER: {self.get_option('remote_user')}", host=self._host())
self._connected = True
+ def _build_command(self, cmd) -> str:
+ """build the command to execute on the lxd host"""
+
+ exec_cmd = [self._lxc_cmd]
+
+ if self.get_option("project"):
+ exec_cmd.extend(["--project", self.get_option("project")])
+
+ exec_cmd.extend(["exec", f"{self.get_option('remote')}:{self._host()}", "--"])
+
+ if self.get_option("remote_user") != "root":
+ self._display.vvv(
+ f"INFO: Running as non-root user: {self.get_option('remote_user')}, \
+ trying to run 'lxc exec' with become method: {self.get_option('lxd_become_method')}",
+ host=self._host(),
+ )
+ exec_cmd.extend(
+ [self.get_option("lxd_become_method"), self.get_option("remote_user"), "-c"]
+ )
+
+ exec_cmd.extend([self.get_option("executable"), "-c", cmd])
+
+ return exec_cmd
+
def exec_command(self, cmd, in_data=None, sudoable=True):
""" execute a command on the lxd host """
super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
- self._display.vvv(u"EXEC {0}".format(cmd), host=self._host())
+ self._display.vvv(f"EXEC {cmd}", host=self._host())
- local_cmd = [self._lxc_cmd]
- if self.get_option("project"):
- local_cmd.extend(["--project", self.get_option("project")])
- local_cmd.extend([
- "exec",
- "%s:%s" % (self.get_option("remote"), self._host()),
- "--",
- self.get_option("executable"), "-c", cmd
- ])
-
- self._display.vvvvv(u"EXEC {0}".format(local_cmd), host=self._host())
+ local_cmd = self._build_command(cmd)
+ self._display.vvvvv(f"EXEC {local_cmd}", host=self._host())
local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd]
in_data = to_bytes(in_data, errors='surrogate_or_strict', nonstring='passthru')
@@ -116,33 +151,73 @@ class Connection(ConnectionBase):
stdout = to_text(stdout)
stderr = to_text(stderr)
- self._display.vvvvv(u"EXEC lxc output: {0} {1}".format(stdout, stderr), host=self._host())
+ self._display.vvvvv(f"EXEC lxc output: {stdout} {stderr}", host=self._host())
if "is not running" in stderr:
- raise AnsibleConnectionFailure("instance not running: %s" % self._host())
+ raise AnsibleConnectionFailure(f"instance not running: {self._host()}")
if stderr.strip() == "Error: Instance not found" or stderr.strip() == "error: not found":
- raise AnsibleConnectionFailure("instance not found: %s" % self._host())
+ raise AnsibleConnectionFailure(f"instance not found: {self._host()}")
return process.returncode, stdout, stderr
+ def _get_remote_uid_gid(self) -> tuple[int, int]:
+ """Get the user and group ID of 'remote_user' from the instance."""
+
+ rc, uid_out, err = self.exec_command("/bin/id -u")
+ if rc != 0:
+ raise AnsibleError(
+ f"Failed to get remote uid for user {self.get_option('remote_user')}: {err}"
+ )
+ uid = uid_out.strip()
+
+ rc, gid_out, err = self.exec_command("/bin/id -g")
+ if rc != 0:
+ raise AnsibleError(
+ f"Failed to get remote gid for user {self.get_option('remote_user')}: {err}"
+ )
+ gid = gid_out.strip()
+
+ return int(uid), int(gid)
+
def put_file(self, in_path, out_path):
""" put a file from local to lxd """
super(Connection, self).put_file(in_path, out_path)
- self._display.vvv(u"PUT {0} TO {1}".format(in_path, out_path), host=self._host())
+ self._display.vvv(f"PUT {in_path} TO {out_path}", host=self._host())
if not os.path.isfile(to_bytes(in_path, errors='surrogate_or_strict')):
- raise AnsibleFileNotFound("input path is not a file: %s" % in_path)
+ raise AnsibleFileNotFound(f"input path is not a file: {in_path}")
local_cmd = [self._lxc_cmd]
if self.get_option("project"):
local_cmd.extend(["--project", self.get_option("project")])
- local_cmd.extend([
- "file", "push",
- in_path,
- "%s:%s/%s" % (self.get_option("remote"), self._host(), out_path)
- ])
+
+ if self.get_option("remote_user") != "root":
+ uid, gid = self._get_remote_uid_gid()
+ local_cmd.extend(
+ [
+ "file",
+ "push",
+ "--uid",
+ str(uid),
+ "--gid",
+ str(gid),
+ in_path,
+ f"{self.get_option('remote')}:{self._host()}/{out_path}",
+ ]
+ )
+ else:
+ local_cmd.extend(
+ [
+ "file",
+ "push",
+ in_path,
+ f"{self.get_option('remote')}:{self._host()}/{out_path}",
+ ]
+ )
+
+ self._display.vvvvv(f"PUT {local_cmd}", host=self._host())
local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd]
@@ -153,14 +228,14 @@ class Connection(ConnectionBase):
""" fetch a file from lxd to local """
super(Connection, self).fetch_file(in_path, out_path)
- self._display.vvv(u"FETCH {0} TO {1}".format(in_path, out_path), host=self._host())
+ self._display.vvv(f"FETCH {in_path} TO {out_path}", host=self._host())
local_cmd = [self._lxc_cmd]
if self.get_option("project"):
local_cmd.extend(["--project", self.get_option("project")])
local_cmd.extend([
"file", "pull",
- "%s:%s/%s" % (self.get_option("remote"), self._host(), in_path),
+ f"{self.get_option('remote')}:{self._host()}/{in_path}",
out_path
])
diff --git a/plugins/connection/qubes.py b/plugins/connection/qubes.py
index c3ad65b18b..5a9963df2d 100644
--- a/plugins/connection/qubes.py
+++ b/plugins/connection/qubes.py
@@ -8,8 +8,7 @@
#
# Written by: Kushal Das (https://github.com/kushaldas)
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
DOCUMENTATION = r"""
@@ -77,7 +76,7 @@ class Connection(ConnectionBase):
"""
display.vvvv("CMD: ", cmd)
if not cmd.endswith("\n"):
- cmd = cmd + "\n"
+ cmd = f"{cmd}\n"
local_cmd = []
# For dom0
@@ -94,7 +93,7 @@ class Connection(ConnectionBase):
display.vvvv("Local cmd: ", local_cmd)
- display.vvv("RUN %s" % (local_cmd,), host=self._remote_vmname)
+ display.vvv(f"RUN {local_cmd}", host=self._remote_vmname)
p = subprocess.Popen(local_cmd, shell=False, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
@@ -113,42 +112,42 @@ class Connection(ConnectionBase):
"""Run specified command in a running QubesVM """
super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
- display.vvvv("CMD IS: %s" % cmd)
+ display.vvvv(f"CMD IS: {cmd}")
rc, stdout, stderr = self._qubes(cmd)
- display.vvvvv("STDOUT %r STDERR %r" % (stdout, stderr))
+ display.vvvvv(f"STDOUT {stdout!r} STDERR {stderr!r}")
return rc, stdout, stderr
def put_file(self, in_path, out_path):
""" Place a local file located in 'in_path' inside VM at 'out_path' """
super(Connection, self).put_file(in_path, out_path)
- display.vvv("PUT %s TO %s" % (in_path, out_path), host=self._remote_vmname)
+ display.vvv(f"PUT {in_path} TO {out_path}", host=self._remote_vmname)
with open(in_path, "rb") as fobj:
source_data = fobj.read()
- retcode, dummy, dummy = self._qubes('cat > "{0}"\n'.format(out_path), source_data, "qubes.VMRootShell")
+ retcode, dummy, dummy = self._qubes(f'cat > "{out_path}\"\n', source_data, "qubes.VMRootShell")
# if qubes.VMRootShell service not supported, fallback to qubes.VMShell and
# hope it will have appropriate permissions
if retcode == 127:
- retcode, dummy, dummy = self._qubes('cat > "{0}"\n'.format(out_path), source_data)
+ retcode, dummy, dummy = self._qubes(f'cat > "{out_path}\"\n', source_data)
if retcode != 0:
- raise AnsibleConnectionFailure('Failed to put_file to {0}'.format(out_path))
+ raise AnsibleConnectionFailure(f'Failed to put_file to {out_path}')
def fetch_file(self, in_path, out_path):
"""Obtain file specified via 'in_path' from the container and place it at 'out_path' """
super(Connection, self).fetch_file(in_path, out_path)
- display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self._remote_vmname)
+ display.vvv(f"FETCH {in_path} TO {out_path}", host=self._remote_vmname)
# We are running in dom0
- cmd_args_list = ["qvm-run", "--pass-io", self._remote_vmname, "cat {0}".format(in_path)]
+ cmd_args_list = ["qvm-run", "--pass-io", self._remote_vmname, f"cat {in_path}"]
with open(out_path, "wb") as fobj:
p = subprocess.Popen(cmd_args_list, shell=False, stdout=fobj)
p.communicate()
if p.returncode != 0:
- raise AnsibleConnectionFailure('Failed to fetch file to {0}'.format(out_path))
+ raise AnsibleConnectionFailure(f'Failed to fetch file to {out_path}')
def close(self):
""" Closing the connection """
diff --git a/plugins/connection/saltstack.py b/plugins/connection/saltstack.py
index 316a15aaa8..f826741926 100644
--- a/plugins/connection/saltstack.py
+++ b/plugins/connection/saltstack.py
@@ -7,8 +7,7 @@
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
DOCUMENTATION = r"""
author: Michael Scherer (@mscherer)
@@ -59,11 +58,11 @@ class Connection(ConnectionBase):
if in_data:
raise errors.AnsibleError("Internal Error: this module does not support optimized module pipelining")
- self._display.vvv("EXEC %s" % cmd, host=self.host)
+ self._display.vvv(f"EXEC {cmd}", host=self.host)
# need to add 'true;' to work around https://github.com/saltstack/salt/issues/28077
- res = self.client.cmd(self.host, 'cmd.exec_code_all', ['bash', 'true;' + cmd])
+ res = self.client.cmd(self.host, 'cmd.exec_code_all', ['bash', f"true;{cmd}"])
if self.host not in res:
- raise errors.AnsibleError("Minion %s didn't answer, check if salt-minion is running and the name is correct" % self.host)
+ raise errors.AnsibleError(f"Minion {self.host} didn't answer, check if salt-minion is running and the name is correct")
p = res[self.host]
return p['retcode'], p['stdout'], p['stderr']
@@ -81,7 +80,7 @@ class Connection(ConnectionBase):
super(Connection, self).put_file(in_path, out_path)
out_path = self._normalize_path(out_path, '/')
- self._display.vvv("PUT %s TO %s" % (in_path, out_path), host=self.host)
+ self._display.vvv(f"PUT {in_path} TO {out_path}", host=self.host)
with open(in_path, 'rb') as in_fh:
content = in_fh.read()
self.client.cmd(self.host, 'hashutil.base64_decodefile', [base64.b64encode(content), out_path])
@@ -93,7 +92,7 @@ class Connection(ConnectionBase):
super(Connection, self).fetch_file(in_path, out_path)
in_path = self._normalize_path(in_path, '/')
- self._display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self.host)
+ self._display.vvv(f"FETCH {in_path} TO {out_path}", host=self.host)
content = self.client.cmd(self.host, 'cp.get_file_str', [in_path])[self.host]
open(out_path, 'wb').write(content)
diff --git a/plugins/connection/wsl.py b/plugins/connection/wsl.py
new file mode 100644
index 0000000000..92ffec52b3
--- /dev/null
+++ b/plugins/connection/wsl.py
@@ -0,0 +1,795 @@
+# -*- coding: utf-8 -*-
+# Derived from ansible/plugins/connection/proxmox_pct_remote.py (c) 2024 Nils Stein (@mietzen)
+# Derived from ansible/plugins/connection/paramiko_ssh.py (c) 2012, Michael DeHaan
+# Copyright (c) 2025 Rui Lopes (@rgl)
+# Copyright (c) 2025 Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import annotations
+
+DOCUMENTATION = r"""
+author: Rui Lopes (@rgl)
+name: wsl
+short_description: Run tasks in WSL distribution using wsl.exe CLI using SSH
+requirements:
+ - paramiko
+description:
+ - Run commands or put/fetch files to an existing WSL distribution using wsl.exe CLI using SSH.
+ - Uses the Python SSH implementation (Paramiko) to connect to the WSL host.
+version_added: "10.6.0"
+options:
+ remote_addr:
+ description:
+ - Address of the remote target.
+ default: inventory_hostname
+ type: string
+ vars:
+ - name: inventory_hostname
+ - name: ansible_host
+ - name: ansible_ssh_host
+ - name: ansible_paramiko_host
+ port:
+ description: Remote port to connect to.
+ type: int
+ default: 22
+ ini:
+ - section: defaults
+ key: remote_port
+ - section: paramiko_connection
+ key: remote_port
+ env:
+ - name: ANSIBLE_REMOTE_PORT
+ - name: ANSIBLE_REMOTE_PARAMIKO_PORT
+ vars:
+ - name: ansible_port
+ - name: ansible_ssh_port
+ - name: ansible_paramiko_port
+ keyword:
+ - name: port
+ remote_user:
+ description:
+ - User to login/authenticate as.
+ - Can be set from the CLI with the C(--user) or C(-u) options.
+ type: string
+ vars:
+ - name: ansible_user
+ - name: ansible_ssh_user
+ - name: ansible_paramiko_user
+ env:
+ - name: ANSIBLE_REMOTE_USER
+ - name: ANSIBLE_PARAMIKO_REMOTE_USER
+ ini:
+ - section: defaults
+ key: remote_user
+ - section: paramiko_connection
+ key: remote_user
+ keyword:
+ - name: remote_user
+ password:
+ description:
+ - Secret used to either login the SSH server or as a passphrase for SSH keys that require it.
+ - Can be set from the CLI with the C(--ask-pass) option.
+ type: string
+ vars:
+ - name: ansible_password
+ - name: ansible_ssh_pass
+ - name: ansible_ssh_password
+ - name: ansible_paramiko_pass
+ - name: ansible_paramiko_password
+ use_rsa_sha2_algorithms:
+ description:
+ - Whether or not to enable RSA SHA2 algorithms for pubkeys and hostkeys.
+ - On paramiko versions older than 2.9, this only affects hostkeys.
+ - For behavior matching paramiko<2.9 set this to V(false).
+ vars:
+ - name: ansible_paramiko_use_rsa_sha2_algorithms
+ ini:
+ - {key: use_rsa_sha2_algorithms, section: paramiko_connection}
+ env:
+ - {name: ANSIBLE_PARAMIKO_USE_RSA_SHA2_ALGORITHMS}
+ default: true
+ type: boolean
+ host_key_auto_add:
+ description: "Automatically add host keys to C(~/.ssh/known_hosts)."
+ env:
+ - name: ANSIBLE_PARAMIKO_HOST_KEY_AUTO_ADD
+ ini:
+ - key: host_key_auto_add
+ section: paramiko_connection
+ type: boolean
+ look_for_keys:
+ default: true
+ description: "Set to V(false) to disable searching for private key files in C(~/.ssh/)."
+ env:
+ - name: ANSIBLE_PARAMIKO_LOOK_FOR_KEYS
+ ini:
+ - {key: look_for_keys, section: paramiko_connection}
+ type: boolean
+ proxy_command:
+ default: ""
+ description:
+ - Proxy information for running the connection through a jumphost.
+ - This option is supported by paramiko version 1.9.0 or newer.
+ type: string
+ env:
+ - name: ANSIBLE_PARAMIKO_PROXY_COMMAND
+ ini:
+ - {key: proxy_command, section: paramiko_connection}
+ vars:
+ - name: ansible_paramiko_proxy_command
+ record_host_keys:
+ default: true
+ description: "Save the host keys to a file."
+ env:
+ - name: ANSIBLE_PARAMIKO_RECORD_HOST_KEYS
+ ini:
+ - section: paramiko_connection
+ key: record_host_keys
+ type: boolean
+ host_key_checking:
+ description: "Set this to V(false) if you want to avoid host key checking by the underlying tools Ansible uses to connect
+ to the host."
+ type: boolean
+ default: true
+ env:
+ - name: ANSIBLE_HOST_KEY_CHECKING
+ - name: ANSIBLE_SSH_HOST_KEY_CHECKING
+ - name: ANSIBLE_PARAMIKO_HOST_KEY_CHECKING
+ ini:
+ - section: defaults
+ key: host_key_checking
+ - section: paramiko_connection
+ key: host_key_checking
+ vars:
+ - name: ansible_host_key_checking
+ - name: ansible_ssh_host_key_checking
+ - name: ansible_paramiko_host_key_checking
+ use_persistent_connections:
+ description: "Toggles the use of persistence for connections."
+ type: boolean
+ default: false
+ env:
+ - name: ANSIBLE_USE_PERSISTENT_CONNECTIONS
+ ini:
+ - section: defaults
+ key: use_persistent_connections
+ banner_timeout:
+ type: float
+ default: 30
+ description:
+ - Configures, in seconds, the amount of time to wait for the SSH banner to be presented.
+ - This option is supported by paramiko version 1.15.0 or newer.
+ ini:
+ - section: paramiko_connection
+ key: banner_timeout
+ env:
+ - name: ANSIBLE_PARAMIKO_BANNER_TIMEOUT
+ timeout:
+ type: int
+ default: 10
+ description:
+ - Number of seconds until the plugin gives up on failing to establish a TCP connection.
+ - This option is supported by paramiko version 2.2.0 or newer.
+ ini:
+ - section: defaults
+ key: timeout
+ - section: ssh_connection
+ key: timeout
+ - section: paramiko_connection
+ key: timeout
+ env:
+ - name: ANSIBLE_TIMEOUT
+ - name: ANSIBLE_SSH_TIMEOUT
+ - name: ANSIBLE_PARAMIKO_TIMEOUT
+ vars:
+ - name: ansible_ssh_timeout
+ - name: ansible_paramiko_timeout
+ cli:
+ - name: timeout
+ lock_file_timeout:
+ type: int
+ default: 60
+ description: Number of seconds until the plugin gives up on trying to write a lock file when writing SSH known host keys.
+ vars:
+ - name: ansible_lock_file_timeout
+ env:
+ - name: ANSIBLE_LOCK_FILE_TIMEOUT
+ private_key_file:
+ description:
+ - Path to private key file to use for authentication.
+ type: path
+ ini:
+ - section: defaults
+ key: private_key_file
+ - section: paramiko_connection
+ key: private_key_file
+ env:
+ - name: ANSIBLE_PRIVATE_KEY_FILE
+ - name: ANSIBLE_PARAMIKO_PRIVATE_KEY_FILE
+ vars:
+ - name: ansible_private_key_file
+ - name: ansible_ssh_private_key_file
+ - name: ansible_paramiko_private_key_file
+ cli:
+ - name: private_key_file
+ option: "--private-key"
+ user_known_hosts_file:
+ description:
+ - Path to the user known hosts file.
+ - Used to verify the ssh hosts keys.
+ type: path
+ default: ~/.ssh/known_hosts
+ ini:
+ - section: paramiko_connection
+ key: user_known_hosts_file
+ vars:
+ - name: ansible_paramiko_user_known_hosts_file
+ wsl_distribution:
+ description:
+ - WSL distribution name.
+ type: string
+ required: true
+ vars:
+ - name: wsl_distribution
+ wsl_user:
+ description:
+ - WSL distribution user.
+ type: string
+ vars:
+ - name: wsl_user
+ become_user:
+ description:
+ - WSL distribution user.
+ type: string
+ default: root
+ vars:
+ - name: become_user
+ - name: ansible_become_user
+ become:
+ description:
+ - Whether to use the user defined by O(become_user).
+ type: bool
+ default: false
+ vars:
+ - name: become
+ - name: ansible_become
+"""
+
+EXAMPLES = r"""
+# ------------------------
+# Inventory: inventory.yml
+# ------------------------
+---
+all:
+ children:
+ wsl:
+ hosts:
+ example-wsl-ubuntu:
+ ansible_host: 10.0.0.10
+ wsl_distribution: ubuntu
+ wsl_user: ubuntu
+ vars:
+ ansible_connection: community.general.wsl
+ ansible_user: vagrant
+# ----------------------
+# Playbook: playbook.yml
+# ----------------------
+---
+- name: WSL Example
+ hosts: wsl
+ gather_facts: true
+ become: true
+ tasks:
+ - name: Ping
+ ansible.builtin.ping:
+ - name: Id (with become false)
+ become: false
+ changed_when: false
+ args:
+ executable: /bin/bash
+ ansible.builtin.shell: |
+ exec 2>&1
+ set -x
+ echo "$0"
+ pwd
+ id
+ - name: Id (with become true)
+ changed_when: false
+ args:
+ executable: /bin/bash
+ ansible.builtin.shell: |
+ exec 2>&1
+ set -x
+ echo "$0"
+ pwd
+ id
+ - name: Reboot
+ ansible.builtin.reboot:
+ boot_time_command: systemctl show -p ActiveEnterTimestamp init.scope
+"""
+
+import io
+import os
+import pathlib
+import shlex
+import socket
+import tempfile
+import traceback
+import typing as t
+
+from ansible.errors import (
+ AnsibleAuthenticationFailure,
+ AnsibleConnectionFailure,
+ AnsibleError,
+)
+from ansible_collections.community.general.plugins.module_utils._filelock import FileLock, LockTimeout
+from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
+from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text
+from ansible.playbook.play_context import PlayContext
+from ansible.plugins.connection import ConnectionBase
+from ansible.utils.display import Display
+from ansible.utils.path import makedirs_safe
+from binascii import hexlify
+from subprocess import list2cmdline
+
+try:
+ import paramiko
+ PARAMIKO_IMPORT_ERR = None
+except ImportError:
+ paramiko = None
+ PARAMIKO_IMPORT_ERR = traceback.format_exc()
+
+
+if t.TYPE_CHECKING and PARAMIKO_IMPORT_ERR is None:
+ from paramiko import MissingHostKeyPolicy
+ from paramiko.client import SSHClient
+ from paramiko.pkey import PKey
+else:
+ MissingHostKeyPolicy: type = object
+ SSHClient: type = object
+ PKey: type = object
+
+
+display = Display()
+
+
+def authenticity_msg(hostname: str, ktype: str, fingerprint: str) -> str:
+ msg = f"""
+ paramiko: The authenticity of host '{hostname}' can't be established.
+ The {ktype} key fingerprint is {fingerprint}.
+ Are you sure you want to continue connecting (yes/no)?
+ """
+ return msg
+
+
+class MyAddPolicy(MissingHostKeyPolicy):
+ """
+ Based on AutoAddPolicy in paramiko so we can determine when keys are added
+
+ and also prompt for input.
+
+ Policy for automatically adding the hostname and new host key to the
+ local L{HostKeys} object, and saving it. This is used by L{SSHClient}.
+ """
+
+ def __init__(self, connection: Connection) -> None:
+ self.connection = connection
+ self._options = connection._options
+
+ def missing_host_key(self, client: SSHClient, hostname: str, key: PKey) -> None:
+
+ if all((self.connection.get_option('host_key_checking'), not self.connection.get_option('host_key_auto_add'))):
+
+ fingerprint = hexlify(key.get_fingerprint())
+ ktype = key.get_name()
+
+ if self.connection.get_option('use_persistent_connections') or self.connection.force_persistence:
+ # don't print the prompt string since the user cannot respond
+ # to the question anyway
+ raise AnsibleError(authenticity_msg(hostname, ktype, fingerprint)[1:92])
+
+ inp = to_text(
+ display.prompt_until(authenticity_msg(hostname, ktype, fingerprint), private=False),
+ errors='surrogate_or_strict'
+ )
+
+ if inp.lower() not in ['yes', 'y', '']:
+ raise AnsibleError('host connection rejected by user')
+
+ key._added_by_ansible_this_time = True
+
+ # existing implementation below:
+ client._host_keys.add(hostname, key.get_name(), key)
+
+ # host keys are actually saved in close() function below
+ # in order to control ordering.
+
+
+class Connection(ConnectionBase):
+ """ SSH based connections (paramiko) to WSL """
+
+ transport = 'community.general.wsl'
+ _log_channel: str | None = None
+
+ def __init__(self, play_context: PlayContext, new_stdin: io.TextIOWrapper | None = None, *args: t.Any, **kwargs: t.Any):
+ super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs)
+
+ def _set_log_channel(self, name: str) -> None:
+ """ Mimic paramiko.SSHClient.set_log_channel """
+ self._log_channel = name
+
+ def _parse_proxy_command(self, port: int = 22) -> dict[str, t.Any]:
+ proxy_command = self.get_option('proxy_command') or None
+
+ sock_kwarg = {}
+ if proxy_command:
+ replacers: t.Dict[str, str] = {
+ '%h': self.get_option('remote_addr'),
+ '%p': str(port),
+ '%r': self.get_option('remote_user')
+ }
+ for find, replace in replacers.items():
+ proxy_command = proxy_command.replace(find, replace)
+ try:
+ sock_kwarg = {'sock': paramiko.ProxyCommand(proxy_command)}
+ display.vvv(f'CONFIGURE PROXY COMMAND FOR CONNECTION: {proxy_command}', host=self.get_option('remote_addr'))
+ except AttributeError:
+ display.warning('Paramiko ProxyCommand support unavailable. '
+ 'Please upgrade to Paramiko 1.9.0 or newer. '
+ 'Not using configured ProxyCommand')
+
+ return sock_kwarg
+
+ def _connect(self) -> Connection:
+ """ activates the connection object """
+
+ if PARAMIKO_IMPORT_ERR is not None:
+ raise AnsibleError(f'paramiko is not installed: {to_native(PARAMIKO_IMPORT_ERR)}')
+
+ port = self.get_option('port')
+ display.vvv(f'ESTABLISH PARAMIKO SSH CONNECTION FOR USER: {self.get_option("remote_user")} on PORT {to_text(port)} TO {self.get_option("remote_addr")}',
+ host=self.get_option('remote_addr'))
+
+ ssh = paramiko.SSHClient()
+
+ # Set pubkey and hostkey algorithms to disable, the only manipulation allowed currently
+ # is keeping or omitting rsa-sha2 algorithms
+ # default_keys: t.Tuple[str] = ()
+ paramiko_preferred_pubkeys = getattr(paramiko.Transport, '_preferred_pubkeys', ())
+ paramiko_preferred_hostkeys = getattr(paramiko.Transport, '_preferred_keys', ())
+ use_rsa_sha2_algorithms = self.get_option('use_rsa_sha2_algorithms')
+ disabled_algorithms: t.Dict[str, t.Iterable[str]] = {}
+ if not use_rsa_sha2_algorithms:
+ if paramiko_preferred_pubkeys:
+ disabled_algorithms['pubkeys'] = tuple(a for a in paramiko_preferred_pubkeys if 'rsa-sha2' in a)
+ if paramiko_preferred_hostkeys:
+ disabled_algorithms['keys'] = tuple(a for a in paramiko_preferred_hostkeys if 'rsa-sha2' in a)
+
+ # override paramiko's default logger name
+ if self._log_channel is not None:
+ ssh.set_log_channel(self._log_channel)
+
+ self.keyfile = os.path.expanduser(self.get_option('user_known_hosts_file'))
+
+ if self.get_option('host_key_checking'):
+ for ssh_known_hosts in ('/etc/ssh/ssh_known_hosts', '/etc/openssh/ssh_known_hosts', self.keyfile):
+ try:
+ ssh.load_system_host_keys(ssh_known_hosts)
+ break
+ except IOError:
+ pass # file was not found, but not required to function
+ except paramiko.hostkeys.InvalidHostKey as e:
+ raise AnsibleConnectionFailure(f'Invalid host key: {to_text(e.line)}')
+ try:
+ ssh.load_system_host_keys()
+ except paramiko.hostkeys.InvalidHostKey as e:
+ raise AnsibleConnectionFailure(f'Invalid host key: {to_text(e.line)}')
+
+ ssh_connect_kwargs = self._parse_proxy_command(port)
+ ssh.set_missing_host_key_policy(MyAddPolicy(self))
+ conn_password = self.get_option('password')
+ allow_agent = True
+
+ if conn_password is not None:
+ allow_agent = False
+
+ try:
+ key_filename = None
+ if self.get_option('private_key_file'):
+ key_filename = os.path.expanduser(self.get_option('private_key_file'))
+
+ # paramiko 2.2 introduced auth_timeout parameter
+ if LooseVersion(paramiko.__version__) >= LooseVersion('2.2.0'):
+ ssh_connect_kwargs['auth_timeout'] = self.get_option('timeout')
+
+ # paramiko 1.15 introduced banner timeout parameter
+ if LooseVersion(paramiko.__version__) >= LooseVersion('1.15.0'):
+ ssh_connect_kwargs['banner_timeout'] = self.get_option('banner_timeout')
+
+ ssh.connect(
+ self.get_option('remote_addr').lower(),
+ username=self.get_option('remote_user'),
+ allow_agent=allow_agent,
+ look_for_keys=self.get_option('look_for_keys'),
+ key_filename=key_filename,
+ password=conn_password,
+ timeout=self.get_option('timeout'),
+ port=port,
+ disabled_algorithms=disabled_algorithms,
+ **ssh_connect_kwargs,
+ )
+ except paramiko.ssh_exception.BadHostKeyException as e:
+ raise AnsibleConnectionFailure(f'host key mismatch for {to_text(e.hostname)}')
+ except paramiko.ssh_exception.AuthenticationException as e:
+ msg = f'Failed to authenticate: {e}'
+ raise AnsibleAuthenticationFailure(msg)
+ except Exception as e:
+ msg = to_text(e)
+ if u'PID check failed' in msg:
+ raise AnsibleError('paramiko version issue, please upgrade paramiko on the machine running ansible')
+ elif u'Private key file is encrypted' in msg:
+ msg = (
+ f'ssh {self.get_option("remote_user")}@{self.get_options("remote_addr")}:{port} : '
+ f'{msg}\nTo connect as a different user, use -u .'
+ )
+ raise AnsibleConnectionFailure(msg)
+ else:
+ raise AnsibleConnectionFailure(msg)
+ self.ssh = ssh
+ self._connected = True
+ return self
+
+ def _any_keys_added(self) -> bool:
+ for hostname, keys in self.ssh._host_keys.items():
+ for keytype, key in keys.items():
+ added_this_time = getattr(key, '_added_by_ansible_this_time', False)
+ if added_this_time:
+ return True
+ return False
+
+ def _save_ssh_host_keys(self, filename: str) -> None:
+ """
+ not using the paramiko save_ssh_host_keys function as we want to add new SSH keys at the bottom so folks
+ don't complain about it :)
+ """
+
+ if not self._any_keys_added():
+ return
+
+ path = os.path.expanduser('~/.ssh')
+ makedirs_safe(path)
+
+ with open(filename, 'w') as f:
+ for hostname, keys in self.ssh._host_keys.items():
+ for keytype, key in keys.items():
+ # was f.write
+ added_this_time = getattr(key, '_added_by_ansible_this_time', False)
+ if not added_this_time:
+ f.write(f'{hostname} {keytype} {key.get_base64()}\n')
+
+ for hostname, keys in self.ssh._host_keys.items():
+ for keytype, key in keys.items():
+ added_this_time = getattr(key, '_added_by_ansible_this_time', False)
+ if added_this_time:
+ f.write(f'{hostname} {keytype} {key.get_base64()}\n')
+
+ def _build_wsl_command(self, cmd: str) -> str:
+ wsl_distribution = self.get_option('wsl_distribution')
+ become = self.get_option('become')
+ become_user = self.get_option('become_user')
+ if become and become_user:
+ wsl_user = become_user
+ else:
+ wsl_user = self.get_option('wsl_user')
+ args = ['wsl.exe', '--distribution', wsl_distribution]
+ if wsl_user:
+ args.extend(['--user', wsl_user])
+ args.extend(['--'])
+ args.extend(shlex.split(cmd))
+ if os.getenv('_ANSIBLE_TEST_WSL_CONNECTION_PLUGIN_Waeri5tepheeSha2fae8'):
+ return shlex.join(args)
+ return list2cmdline(args) # see https://github.com/python/cpython/blob/3.11/Lib/subprocess.py#L576
+
+ def exec_command(self, cmd: str, in_data: bytes | None = None, sudoable: bool = True) -> tuple[int, bytes, bytes]:
+ """ run a command on inside a WSL distribution """
+
+ cmd = self._build_wsl_command(cmd)
+
+ super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
+
+ bufsize = 4096
+
+ try:
+ self.ssh.get_transport().set_keepalive(5)
+ chan = self.ssh.get_transport().open_session()
+ except Exception as e:
+ text_e = to_text(e)
+ msg = 'Failed to open session'
+ if text_e:
+ msg += f': {text_e}'
+ raise AnsibleConnectionFailure(to_native(msg))
+
+ display.vvv(f'EXEC {cmd}', host=self.get_option('remote_addr'))
+
+ cmd = to_bytes(cmd, errors='surrogate_or_strict')
+
+ no_prompt_out = b''
+ no_prompt_err = b''
+ become_output = b''
+
+ try:
+ chan.exec_command(cmd)
+ if self.become and self.become.expect_prompt():
+ password_prompt = False
+ become_success = False
+ while not (become_success or password_prompt):
+ display.debug('Waiting for Privilege Escalation input')
+
+ chunk = chan.recv(bufsize)
+ display.debug(f'chunk is: {to_text(chunk)}')
+ if not chunk:
+ if b'unknown user' in become_output:
+ n_become_user = to_native(self.become.get_option('become_user'))
+ raise AnsibleError(f'user {n_become_user} does not exist')
+ else:
+ break
+ # raise AnsibleError('ssh connection closed waiting for password prompt')
+ become_output += chunk
+
+ # need to check every line because we might get lectured
+ # and we might get the middle of a line in a chunk
+ for line in become_output.splitlines(True):
+ if self.become.check_success(line):
+ become_success = True
+ break
+ elif self.become.check_password_prompt(line):
+ password_prompt = True
+ break
+
+ if password_prompt:
+ if self.become:
+ become_pass = self.become.get_option('become_pass')
+ chan.sendall(to_bytes(become_pass + '\n', errors='surrogate_or_strict'))
+ else:
+ raise AnsibleError('A password is required but none was supplied')
+ else:
+ no_prompt_out += become_output
+ no_prompt_err += become_output
+
+ if in_data:
+ for i in range(0, len(in_data), bufsize):
+ chan.send(in_data[i:i + bufsize])
+ chan.shutdown_write()
+ elif in_data == b'':
+ chan.shutdown_write()
+
+ except socket.timeout:
+ raise AnsibleError(f'ssh timed out waiting for privilege escalation.\n{to_text(become_output)}')
+
+ stdout = b''.join(chan.makefile('rb', bufsize))
+ stderr = b''.join(chan.makefile_stderr('rb', bufsize))
+ returncode = chan.recv_exit_status()
+
+ # NB the full english error message is:
+ # 'wsl.exe' is not recognized as an internal or external command,
+ # operable program or batch file.
+ if "'wsl.exe' is not recognized" in stderr.decode('utf-8'):
+ raise AnsibleError(
+ f'wsl.exe not found in path of host: {to_text(self.get_option("remote_addr"))}')
+
+ return (returncode, no_prompt_out + stdout, no_prompt_out + stderr)
+
+ def put_file(self, in_path: str, out_path: str) -> None:
+ """ transfer a file from local to remote """
+
+ display.vvv(f'PUT {in_path} TO {out_path}', host=self.get_option('remote_addr'))
+ try:
+ with open(in_path, 'rb') as f:
+ data = f.read()
+ returncode, stdout, stderr = self.exec_command(
+ ' '.join([
+ self._shell.executable, '-c',
+ self._shell.quote(f'cat > {out_path}')]),
+ in_data=data,
+ sudoable=False)
+ if returncode != 0:
+ if 'cat: not found' in stderr.decode('utf-8'):
+ raise AnsibleError(
+ f'cat not found in path of WSL distribution: {to_text(self.get_option("wsl_distribution"))}')
+ raise AnsibleError(
+ f'{to_text(stdout)}\n{to_text(stderr)}')
+ except Exception as e:
+ raise AnsibleError(
+ f'error occurred while putting file from {in_path} to {out_path}!\n{to_text(e)}')
+
+ def fetch_file(self, in_path: str, out_path: str) -> None:
+ """ save a remote file to the specified path """
+
+ display.vvv(f'FETCH {in_path} TO {out_path}', host=self.get_option('remote_addr'))
+ try:
+ returncode, stdout, stderr = self.exec_command(
+ ' '.join([
+ self._shell.executable, '-c',
+ self._shell.quote(f'cat {in_path}')]),
+ sudoable=False)
+ if returncode != 0:
+ if 'cat: not found' in stderr.decode('utf-8'):
+ raise AnsibleError(
+ f'cat not found in path of WSL distribution: {to_text(self.get_option("wsl_distribution"))}')
+ raise AnsibleError(
+ f'{to_text(stdout)}\n{to_text(stderr)}')
+ with open(out_path, 'wb') as f:
+ f.write(stdout)
+ except Exception as e:
+ raise AnsibleError(
+ f'error occurred while fetching file from {in_path} to {out_path}!\n{to_text(e)}')
+
+ def reset(self) -> None:
+ """ reset the connection """
+
+ if not self._connected:
+ return
+ self.close()
+ self._connect()
+
+ def close(self) -> None:
+ """ terminate the connection """
+
+ if self.get_option('host_key_checking') and self.get_option('record_host_keys') and self._any_keys_added():
+ # add any new SSH host keys -- warning -- this could be slow
+ # (This doesn't acquire the connection lock because it needs
+ # to exclude only other known_hosts writers, not connections
+ # that are starting up.)
+ lockfile = os.path.basename(self.keyfile)
+ dirname = os.path.dirname(self.keyfile)
+ makedirs_safe(dirname)
+ tmp_keyfile_name = None
+ try:
+ with FileLock().lock_file(lockfile, dirname, self.get_option('lock_file_timeout')):
+ # just in case any were added recently
+
+ self.ssh.load_system_host_keys()
+ self.ssh._host_keys.update(self.ssh._system_host_keys)
+
+ # gather information about the current key file, so
+ # we can ensure the new file has the correct mode/owner
+
+ key_dir = os.path.dirname(self.keyfile)
+ if os.path.exists(self.keyfile):
+ key_stat = os.stat(self.keyfile)
+ mode = key_stat.st_mode & 0o777
+ uid = key_stat.st_uid
+ gid = key_stat.st_gid
+ else:
+ mode = 0o644
+ uid = os.getuid()
+ gid = os.getgid()
+
+ # Save the new keys to a temporary file and move it into place
+ # rather than rewriting the file. We set delete=False because
+ # the file will be moved into place rather than cleaned up.
+
+ with tempfile.NamedTemporaryFile(dir=key_dir, delete=False) as tmp_keyfile:
+ tmp_keyfile_name = tmp_keyfile.name
+ os.chmod(tmp_keyfile_name, mode)
+ os.chown(tmp_keyfile_name, uid, gid)
+ self._save_ssh_host_keys(tmp_keyfile_name)
+
+ os.rename(tmp_keyfile_name, self.keyfile)
+ except LockTimeout:
+ raise AnsibleError(
+ f'writing lock file for {self.keyfile} ran in to the timeout of {self.get_option("lock_file_timeout")}s')
+ except paramiko.hostkeys.InvalidHostKey as e:
+ raise AnsibleConnectionFailure(f'Invalid host key: {e.line}')
+ except Exception as e:
+ # unable to save keys, including scenario when key was invalid
+ # and caught earlier
+ raise AnsibleError(
+ f'error occurred while writing SSH host keys!\n{to_text(e)}')
+ finally:
+ if tmp_keyfile_name is not None:
+ pathlib.Path(tmp_keyfile_name).unlink(missing_ok=True)
+
+ self.ssh.close()
+ self._connected = False
diff --git a/plugins/connection/zone.py b/plugins/connection/zone.py
index ffddad0d46..baca9312b3 100644
--- a/plugins/connection/zone.py
+++ b/plugins/connection/zone.py
@@ -8,8 +8,7 @@
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
DOCUMENTATION = r"""
author: Ansible Core Team
@@ -62,14 +61,14 @@ class Connection(ConnectionBase):
self.zlogin_cmd = to_bytes(self._search_executable('zlogin'))
if self.zone not in self.list_zones():
- raise AnsibleError("incorrect zone name %s" % self.zone)
+ raise AnsibleError(f"incorrect zone name {self.zone}")
@staticmethod
def _search_executable(executable):
try:
return get_bin_path(executable)
except ValueError:
- raise AnsibleError("%s command not found in PATH" % executable)
+ raise AnsibleError(f"{executable} command not found in PATH")
def list_zones(self):
process = subprocess.Popen([self.zoneadm_cmd, 'list', '-ip'],
@@ -94,7 +93,7 @@ class Connection(ConnectionBase):
# stdout, stderr = p.communicate()
path = process.stdout.readlines()[0].split(':')[3]
- return path + '/root'
+ return f"{path}/root"
def _connect(self):
""" connect to the zone; nothing to do here """
@@ -117,7 +116,7 @@ class Connection(ConnectionBase):
local_cmd = [self.zlogin_cmd, self.zone, cmd]
local_cmd = map(to_bytes, local_cmd)
- display.vvv("EXEC %s" % (local_cmd), host=self.zone)
+ display.vvv(f"EXEC {local_cmd}", host=self.zone)
p = subprocess.Popen(local_cmd, shell=False, stdin=stdin,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
@@ -140,7 +139,7 @@ class Connection(ConnectionBase):
exist in any given chroot. So for now we're choosing "/" instead.
This also happens to be the former default.
- Can revisit using $HOME instead if it's a problem
+ Can revisit using $HOME instead if it is a problem
"""
if not remote_path.startswith(os.path.sep):
remote_path = os.path.join(os.path.sep, remote_path)
@@ -149,7 +148,7 @@ class Connection(ConnectionBase):
def put_file(self, in_path, out_path):
""" transfer a file from local to zone """
super(Connection, self).put_file(in_path, out_path)
- display.vvv("PUT %s TO %s" % (in_path, out_path), host=self.zone)
+ display.vvv(f"PUT {in_path} TO {out_path}", host=self.zone)
out_path = shlex_quote(self._prefix_login_path(out_path))
try:
@@ -159,27 +158,27 @@ class Connection(ConnectionBase):
else:
count = ''
try:
- p = self._buffered_exec_command('dd of=%s bs=%s%s' % (out_path, BUFSIZE, count), stdin=in_file)
+ p = self._buffered_exec_command(f'dd of={out_path} bs={BUFSIZE}{count}', stdin=in_file)
except OSError:
raise AnsibleError("jail connection requires dd command in the jail")
try:
stdout, stderr = p.communicate()
except Exception:
traceback.print_exc()
- raise AnsibleError("failed to transfer file %s to %s" % (in_path, out_path))
+ raise AnsibleError(f"failed to transfer file {in_path} to {out_path}")
if p.returncode != 0:
- raise AnsibleError("failed to transfer file %s to %s:\n%s\n%s" % (in_path, out_path, stdout, stderr))
+ raise AnsibleError(f"failed to transfer file {in_path} to {out_path}:\n{stdout}\n{stderr}")
except IOError:
- raise AnsibleError("file or module does not exist at: %s" % in_path)
+ raise AnsibleError(f"file or module does not exist at: {in_path}")
def fetch_file(self, in_path, out_path):
""" fetch a file from zone to local """
super(Connection, self).fetch_file(in_path, out_path)
- display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self.zone)
+ display.vvv(f"FETCH {in_path} TO {out_path}", host=self.zone)
in_path = shlex_quote(self._prefix_login_path(in_path))
try:
- p = self._buffered_exec_command('dd if=%s bs=%s' % (in_path, BUFSIZE))
+ p = self._buffered_exec_command(f'dd if={in_path} bs={BUFSIZE}')
except OSError:
raise AnsibleError("zone connection requires dd command in the zone")
@@ -191,10 +190,10 @@ class Connection(ConnectionBase):
chunk = p.stdout.read(BUFSIZE)
except Exception:
traceback.print_exc()
- raise AnsibleError("failed to transfer file %s to %s" % (in_path, out_path))
+ raise AnsibleError(f"failed to transfer file {in_path} to {out_path}")
stdout, stderr = p.communicate()
if p.returncode != 0:
- raise AnsibleError("failed to transfer file %s to %s:\n%s\n%s" % (in_path, out_path, stdout, stderr))
+ raise AnsibleError(f"failed to transfer file {in_path} to {out_path}:\n{stdout}\n{stderr}")
def close(self):
""" terminate the connection; nothing to do here """
diff --git a/plugins/doc_fragments/alicloud.py b/plugins/doc_fragments/alicloud.py
index b462fcacb4..cf7255b465 100644
--- a/plugins/doc_fragments/alicloud.py
+++ b/plugins/doc_fragments/alicloud.py
@@ -11,99 +11,89 @@ __metaclass__ = type
class ModuleDocFragment(object):
# Alicloud only documentation fragment
- DOCUMENTATION = r'''
+ DOCUMENTATION = r"""
options:
alicloud_access_key:
description:
- - Alibaba Cloud access key. If not set then the value of environment variable E(ALICLOUD_ACCESS_KEY),
- E(ALICLOUD_ACCESS_KEY_ID) will be used instead.
+ - Alibaba Cloud access key. If not set then the value of environment variable E(ALICLOUD_ACCESS_KEY), E(ALICLOUD_ACCESS_KEY_ID)
+ is used instead.
aliases: ['access_key_id', 'access_key']
type: str
alicloud_secret_key:
description:
- - Alibaba Cloud secret key. If not set then the value of environment variable E(ALICLOUD_SECRET_KEY),
- E(ALICLOUD_SECRET_ACCESS_KEY) will be used instead.
+ - Alibaba Cloud secret key. If not set then the value of environment variable E(ALICLOUD_SECRET_KEY), E(ALICLOUD_SECRET_ACCESS_KEY)
+ is used instead.
aliases: ['secret_access_key', 'secret_key']
type: str
alicloud_region:
description:
- - The Alibaba Cloud region to use. If not specified then the value of environment variable
- E(ALICLOUD_REGION), E(ALICLOUD_REGION_ID) will be used instead.
+ - The Alibaba Cloud region to use. If not specified then the value of environment variable E(ALICLOUD_REGION), E(ALICLOUD_REGION_ID)
+ is used instead.
aliases: ['region', 'region_id']
required: true
type: str
alicloud_security_token:
description:
- - The Alibaba Cloud security token. If not specified then the value of environment variable
- E(ALICLOUD_SECURITY_TOKEN) will be used instead.
+ - The Alibaba Cloud security token. If not specified then the value of environment variable E(ALICLOUD_SECURITY_TOKEN)
+ is used instead.
aliases: ['security_token']
type: str
alicloud_assume_role:
description:
- - If provided with a role ARN, Ansible will attempt to assume this role using the supplied credentials.
- - The nested assume_role block supports C(alicloud_assume_role_arn), C(alicloud_assume_role_session_name),
- C(alicloud_assume_role_session_expiration) and C(alicloud_assume_role_policy).
+ - If provided with a role ARN, Ansible attempts to assume this role using the supplied credentials.
+ - The nested assume_role block supports C(alicloud_assume_role_arn), C(alicloud_assume_role_session_name), C(alicloud_assume_role_session_expiration)
+ and C(alicloud_assume_role_policy).
type: dict
aliases: ['assume_role']
alicloud_assume_role_arn:
description:
- - The Alibaba Cloud C(role_arn). The ARN of the role to assume. If ARN is set to an empty string,
- it does not perform role switching. It supports environment variable E(ALICLOUD_ASSUME_ROLE_ARN).
- ansible will execute with provided credentials.
+ - The Alibaba Cloud C(role_arn). The ARN of the role to assume. If ARN is set to an empty string, it does not perform
+ role switching. It supports environment variable E(ALICLOUD_ASSUME_ROLE_ARN). Ansible executes with provided credentials.
aliases: ['assume_role_arn']
type: str
alicloud_assume_role_session_name:
description:
- - The Alibaba Cloud session_name. The session name to use when assuming the role. If omitted,
- 'ansible' is passed to the AssumeRole call as session name. It supports environment variable
- E(ALICLOUD_ASSUME_ROLE_SESSION_NAME).
+ - The Alibaba Cloud session_name. The session name to use when assuming the role. If omitted, 'ansible' is passed to
+ the AssumeRole call as session name. It supports environment variable E(ALICLOUD_ASSUME_ROLE_SESSION_NAME).
aliases: ['assume_role_session_name']
type: str
alicloud_assume_role_session_expiration:
description:
- - The Alibaba Cloud C(session_expiration). The time after which the established session for assuming
- role expires. Valid value range 900-3600 seconds. Default to 3600 (in this case Alicloud use own default
- value). It supports environment variable E(ALICLOUD_ASSUME_ROLE_SESSION_EXPIRATION).
+ - The Alibaba Cloud C(session_expiration). The time after which the established session for assuming role expires. Valid
+ value range 900-3600 seconds. Default to 3600 (in this case Alicloud use own default value). It supports environment
+ variable E(ALICLOUD_ASSUME_ROLE_SESSION_EXPIRATION).
aliases: ['assume_role_session_expiration']
type: int
ecs_role_name:
description:
- - The RAM Role Name attached on a ECS instance for API operations. You can retrieve this from the 'Access Control'
- section of the Alibaba Cloud console.
- - If you're running Ansible from an ECS instance with RAM Instance using RAM Role, Ansible will just access the
- metadata U(http://100.100.100.200/latest/meta-data/ram/security-credentials/) to obtain the STS
- credential. This is a preferred approach over any other when running in ECS as you can avoid hard coding
- credentials. Instead these are leased on-the-fly by Ansible which reduces the chance of leakage.
+ - The RAM Role Name attached on a ECS instance for API operations. You can retrieve this from the 'Access Control' section
+ of the Alibaba Cloud console.
+ - If you are running Ansible from an ECS instance with RAM Instance using RAM Role, Ansible just accesses the metadata
+ U(http://100.100.100.200/latest/meta-data/ram/security-credentials/) to obtain the STS credential.
+ This is a preferred approach over any other when running in ECS as you can avoid hard coding credentials. Instead
+ these are leased on-the-fly by Ansible which reduces the chance of leakage.
aliases: ['role_name']
type: str
profile:
description:
- - This is the Alicloud profile name as set in the shared credentials file. It can also be sourced from the
- E(ALICLOUD_PROFILE) environment variable.
+ - This is the Alicloud profile name as set in the shared credentials file. It can also be sourced from the E(ALICLOUD_PROFILE)
+ environment variable.
type: str
shared_credentials_file:
description:
- This is the path to the shared credentials file. It can also be sourced from the E(ALICLOUD_SHARED_CREDENTIALS_FILE)
environment variable.
- - If this is not set and a profile is specified, C(~/.aliyun/config.json) will be used.
+ - If this is not set and a profile is specified, C(~/.aliyun/config.json) is used.
type: str
author:
- - "He Guimin (@xiaozhu36)"
+ - "He Guimin (@xiaozhu36)"
requirements:
- - "Python >= 3.6"
+ - "Python >= 3.6"
notes:
- - If parameters are not set within the module, the following
- environment variables can be used in decreasing order of precedence
- E(ALICLOUD_ACCESS_KEY) or E(ALICLOUD_ACCESS_KEY_ID),
- E(ALICLOUD_SECRET_KEY) or E(ALICLOUD_SECRET_ACCESS_KEY),
- E(ALICLOUD_REGION) or E(ALICLOUD_REGION_ID),
- E(ALICLOUD_SECURITY_TOKEN),
- E(ALICLOUD_ECS_ROLE_NAME),
- E(ALICLOUD_SHARED_CREDENTIALS_FILE),
- E(ALICLOUD_PROFILE),
- E(ALICLOUD_ASSUME_ROLE_ARN),
- E(ALICLOUD_ASSUME_ROLE_SESSION_NAME),
- E(ALICLOUD_ASSUME_ROLE_SESSION_EXPIRATION).
- - E(ALICLOUD_REGION) or E(ALICLOUD_REGION_ID) can be typically be used to specify the
- Alicloud region, when required, but this can also be configured in the footmark config file
-'''
+ - If parameters are not set within the module, the following environment variables can be used in decreasing order of precedence
+ E(ALICLOUD_ACCESS_KEY) or E(ALICLOUD_ACCESS_KEY_ID), E(ALICLOUD_SECRET_KEY) or E(ALICLOUD_SECRET_ACCESS_KEY), E(ALICLOUD_REGION)
+ or E(ALICLOUD_REGION_ID), E(ALICLOUD_SECURITY_TOKEN), E(ALICLOUD_ECS_ROLE_NAME), E(ALICLOUD_SHARED_CREDENTIALS_FILE),
+ E(ALICLOUD_PROFILE), E(ALICLOUD_ASSUME_ROLE_ARN), E(ALICLOUD_ASSUME_ROLE_SESSION_NAME), E(ALICLOUD_ASSUME_ROLE_SESSION_EXPIRATION).
+ - E(ALICLOUD_REGION) or E(ALICLOUD_REGION_ID) can be typically be used to specify the Alicloud region, when required, but
+ this can also be configured in the footmark config file.
+"""
diff --git a/plugins/doc_fragments/attributes.py b/plugins/doc_fragments/attributes.py
index 9b8488e0a5..18b02575c4 100644
--- a/plugins/doc_fragments/attributes.py
+++ b/plugins/doc_fragments/attributes.py
@@ -11,83 +11,83 @@ __metaclass__ = type
class ModuleDocFragment(object):
# Standard documentation fragment
- DOCUMENTATION = r'''
+ DOCUMENTATION = r"""
options: {}
attributes:
- check_mode:
- description: Can run in C(check_mode) and return changed status prediction without modifying target.
- diff_mode:
- description: Will return details on what has changed (or possibly needs changing in C(check_mode)), when in diff mode.
-'''
+ check_mode:
+ description: Can run in C(check_mode) and return changed status prediction without modifying target.
+ diff_mode:
+ description: Returns details on what has changed (or possibly needs changing in C(check_mode)), when in diff mode.
+"""
- PLATFORM = r'''
+ PLATFORM = r"""
options: {}
attributes:
- platform:
- description: Target OS/families that can be operated against.
- support: N/A
-'''
+ platform:
+ description: Target OS/families that can be operated against.
+ support: N/A
+"""
# Should be used together with the standard fragment
INFO_MODULE = r'''
options: {}
attributes:
- check_mode:
- support: full
- details:
- - This action does not modify state.
- diff_mode:
- support: N/A
- details:
- - This action does not modify state.
+ check_mode:
+ support: full
+ details:
+ - This action does not modify state.
+ diff_mode:
+ support: N/A
+ details:
+ - This action does not modify state.
'''
- CONN = r'''
+ CONN = r"""
options: {}
attributes:
- become:
- description: Is usable alongside C(become) keywords.
- connection:
- description: Uses the target's configured connection information to execute code on it.
- delegation:
- description: Can be used in conjunction with C(delegate_to) and related keywords.
-'''
+ become:
+ description: Is usable alongside C(become) keywords.
+ connection:
+ description: Uses the target's configured connection information to execute code on it.
+ delegation:
+ description: Can be used in conjunction with C(delegate_to) and related keywords.
+"""
- FACTS = r'''
+ FACTS = r"""
options: {}
attributes:
- facts:
- description: Action returns an C(ansible_facts) dictionary that will update existing host facts.
-'''
+ facts:
+ description: Action returns an C(ansible_facts) dictionary that updates existing host facts.
+"""
# Should be used together with the standard fragment and the FACTS fragment
FACTS_MODULE = r'''
options: {}
attributes:
- check_mode:
- support: full
- details:
- - This action does not modify state.
- diff_mode:
- support: N/A
- details:
- - This action does not modify state.
- facts:
- support: full
+ check_mode:
+ support: full
+ details:
+ - This action does not modify state.
+ diff_mode:
+ support: N/A
+ details:
+ - This action does not modify state.
+ facts:
+ support: full
'''
- FILES = r'''
+ FILES = r"""
options: {}
attributes:
- safe_file_operations:
- description: Uses Ansible's strict file operation functions to ensure proper permissions and avoid data corruption.
-'''
+ safe_file_operations:
+ description: Uses Ansible's strict file operation functions to ensure proper permissions and avoid data corruption.
+"""
- FLOW = r'''
+ FLOW = r"""
options: {}
attributes:
- action:
- description: Indicates this has a corresponding action plugin so some parts of the options can be executed on the controller.
- async:
- description: Supports being used with the C(async) keyword.
-'''
+ action:
+ description: Indicates this has a corresponding action plugin so some parts of the options can be executed on the controller.
+ async:
+ description: Supports being used with the C(async) keyword.
+"""
diff --git a/plugins/doc_fragments/auth_basic.py b/plugins/doc_fragments/auth_basic.py
index 77d127c629..438435a6a3 100644
--- a/plugins/doc_fragments/auth_basic.py
+++ b/plugins/doc_fragments/auth_basic.py
@@ -10,7 +10,7 @@ __metaclass__ = type
class ModuleDocFragment(object):
# Standard files documentation fragment
- DOCUMENTATION = r'''
+ DOCUMENTATION = r"""
options:
api_url:
description:
@@ -29,4 +29,4 @@ options:
- Whether or not to validate SSL certs when supplying a HTTPS endpoint.
type: bool
default: true
-'''
+"""
diff --git a/plugins/doc_fragments/bitbucket.py b/plugins/doc_fragments/bitbucket.py
index 0a66ea0a68..65c4c47b51 100644
--- a/plugins/doc_fragments/bitbucket.py
+++ b/plugins/doc_fragments/bitbucket.py
@@ -11,34 +11,34 @@ __metaclass__ = type
class ModuleDocFragment(object):
# Standard documentation fragment
- DOCUMENTATION = r'''
+ DOCUMENTATION = r"""
options:
client_id:
description:
- The OAuth consumer key.
- - If not set the environment variable E(BITBUCKET_CLIENT_ID) will be used.
+ - If not set the environment variable E(BITBUCKET_CLIENT_ID) is used.
type: str
client_secret:
description:
- The OAuth consumer secret.
- - If not set the environment variable E(BITBUCKET_CLIENT_SECRET) will be used.
+ - If not set the environment variable E(BITBUCKET_CLIENT_SECRET) is used.
type: str
user:
description:
- The username.
- - If not set the environment variable E(BITBUCKET_USERNAME) will be used.
+ - If not set the environment variable E(BITBUCKET_USERNAME) is used.
- O(ignore:username) is an alias of O(user) since community.general 6.0.0. It was an alias of O(workspace) before.
type: str
version_added: 4.0.0
- aliases: [ username ]
+ aliases: [username]
password:
description:
- The App password.
- - If not set the environment variable E(BITBUCKET_PASSWORD) will be used.
+ - If not set the environment variable E(BITBUCKET_PASSWORD) is used.
type: str
version_added: 4.0.0
notes:
- Bitbucket OAuth consumer key and secret can be obtained from Bitbucket profile -> Settings -> Access Management -> OAuth.
- Bitbucket App password can be created from Bitbucket profile -> Personal Settings -> App passwords.
- If both OAuth and Basic Auth credentials are passed, OAuth credentials take precedence.
-'''
+"""
diff --git a/plugins/doc_fragments/consul.py b/plugins/doc_fragments/consul.py
index d4cf119958..0703971a2e 100644
--- a/plugins/doc_fragments/consul.py
+++ b/plugins/doc_fragments/consul.py
@@ -15,7 +15,7 @@ class ModuleDocFragment:
options:
host:
description:
- - Host of the consul agent, defaults to V(localhost).
+ - Host of the Consul agent.
default: localhost
type: str
port:
@@ -25,18 +25,18 @@ options:
default: 8500
scheme:
description:
- - The protocol scheme on which the consul agent is running.
- Defaults to V(http) and can be set to V(https) for secure connections.
+ - The protocol scheme on which the Consul agent is running. Defaults to V(http) and can be set to V(https) for secure
+ connections.
default: http
type: str
validate_certs:
type: bool
description:
- - Whether to verify the TLS certificate of the consul agent.
+ - Whether to verify the TLS certificate of the Consul agent.
default: true
ca_path:
description:
- - The CA bundle to use for https connections
+ - The CA bundle to use for https connections.
type: str
"""
diff --git a/plugins/doc_fragments/dimensiondata.py b/plugins/doc_fragments/dimensiondata.py
index f4d6244540..890c4d741e 100644
--- a/plugins/doc_fragments/dimensiondata.py
+++ b/plugins/doc_fragments/dimensiondata.py
@@ -14,8 +14,7 @@ __metaclass__ = type
class ModuleDocFragment(object):
# Dimension Data doc fragment
- DOCUMENTATION = r'''
-
+ DOCUMENTATION = r"""
options:
region:
description:
@@ -29,12 +28,12 @@ options:
mcp_user:
description:
- The username used to authenticate to the CloudControl API.
- - If not specified, will fall back to E(MCP_USER) from environment variable or C(~/.dimensiondata).
+ - If not specified, falls back to E(MCP_USER) from environment variable or C(~/.dimensiondata).
type: str
mcp_password:
description:
- The password used to authenticate to the CloudControl API.
- - If not specified, will fall back to E(MCP_PASSWORD) from environment variable or C(~/.dimensiondata).
+ - If not specified, falls back to E(MCP_PASSWORD) from environment variable or C(~/.dimensiondata).
- Required if O(mcp_user) is specified.
type: str
location:
@@ -44,8 +43,8 @@ options:
required: true
validate_certs:
description:
- - If V(false), SSL certificates will not be validated.
+ - If V(false), SSL certificates are not validated.
- This should only be used on private instances of the CloudControl API that use self-signed certificates.
type: bool
default: true
-'''
+"""
diff --git a/plugins/doc_fragments/dimensiondata_wait.py b/plugins/doc_fragments/dimensiondata_wait.py
index 051d8ca1d3..d3ab3b9783 100644
--- a/plugins/doc_fragments/dimensiondata_wait.py
+++ b/plugins/doc_fragments/dimensiondata_wait.py
@@ -14,8 +14,7 @@ __metaclass__ = type
class ModuleDocFragment(object):
# Dimension Data ("wait-for-completion" parameters) doc fragment
- DOCUMENTATION = r'''
-
+ DOCUMENTATION = r"""
options:
wait:
description:
@@ -34,4 +33,4 @@ options:
- Only applicable if O(wait=true).
type: int
default: 2
-'''
+"""
diff --git a/plugins/doc_fragments/django.py b/plugins/doc_fragments/django.py
index f89ec91448..5d01c8323e 100644
--- a/plugins/doc_fragments/django.py
+++ b/plugins/doc_fragments/django.py
@@ -8,7 +8,7 @@ __metaclass__ = type
class ModuleDocFragment(object):
- DOCUMENTATION = r'''
+ DOCUMENTATION = r"""
options:
venv:
description:
@@ -18,13 +18,13 @@ options:
settings:
description:
- Specifies the settings module to use.
- - The value will be passed as is to the C(--settings) argument in C(django-admin).
+ - The value is passed as is to the C(--settings) argument in C(django-admin).
type: str
required: true
pythonpath:
description:
- Adds the given filesystem path to the Python import search path.
- - The value will be passed as is to the C(--pythonpath) argument in C(django-admin).
+ - The value is passed as is to the C(--pythonpath) argument in C(django-admin).
type: path
traceback:
description:
@@ -43,20 +43,19 @@ options:
notes:
- The C(django-admin) command is always executed using the C(C) locale, and the option C(--no-color) is always passed.
-
seealso:
- name: django-admin and manage.py in official Django documentation
description: >-
- Refer to this documentation for the builtin commands and options of C(django-admin).
- Please make sure that you select the right version of Django in the version selector on that page.
+ Refer to this documentation for the builtin commands and options of C(django-admin). Please make sure that you select
+ the right version of Django in the version selector on that page.
link: https://docs.djangoproject.com/en/5.0/ref/django-admin/
-'''
+"""
- DATABASE = r'''
+ DATABASE = r"""
options:
database:
description:
- Specify the database to be used.
type: str
default: default
-'''
+"""
diff --git a/plugins/doc_fragments/emc.py b/plugins/doc_fragments/emc.py
index d685c510d2..14dc7bc129 100644
--- a/plugins/doc_fragments/emc.py
+++ b/plugins/doc_fragments/emc.py
@@ -10,33 +10,24 @@ __metaclass__ = type
class ModuleDocFragment(object):
- DOCUMENTATION = r'''
-options:
- - See respective platform section for more details
-requirements:
- - See respective platform section for more details
-notes:
- - Ansible modules are available for EMC VNX.
-'''
-
# Documentation fragment for VNX (emc_vnx)
EMC_VNX = r'''
options:
- sp_address:
- description:
- - Address of the SP of target/secondary storage.
- type: str
- required: true
- sp_user:
- description:
- - Username for accessing SP.
- type: str
- default: sysadmin
- sp_password:
- description:
- - password for accessing SP.
- type: str
- default: sysadmin
+ sp_address:
+ description:
+ - Address of the SP of target/secondary storage.
+ type: str
+ required: true
+ sp_user:
+ description:
+ - Username for accessing SP.
+ type: str
+ default: sysadmin
+ sp_password:
+ description:
+ - password for accessing SP.
+ type: str
+ default: sysadmin
requirements:
- An EMC VNX Storage device.
- storops (0.5.10 or greater). Install using C(pip install storops).
diff --git a/plugins/doc_fragments/gitlab.py b/plugins/doc_fragments/gitlab.py
index c6434c0ced..48182ed35c 100644
--- a/plugins/doc_fragments/gitlab.py
+++ b/plugins/doc_fragments/gitlab.py
@@ -10,7 +10,7 @@ __metaclass__ = type
class ModuleDocFragment(object):
# Standard files documentation fragment
- DOCUMENTATION = r'''
+ DOCUMENTATION = r"""
requirements:
- requests (Python library U(https://pypi.org/project/requests/))
@@ -34,4 +34,4 @@ options:
- The CA certificates bundle to use to verify GitLab server certificate.
type: str
version_added: 8.1.0
-'''
+"""
diff --git a/plugins/doc_fragments/hpe3par.py b/plugins/doc_fragments/hpe3par.py
index 606a2502a6..dadd6e78b3 100644
--- a/plugins/doc_fragments/hpe3par.py
+++ b/plugins/doc_fragments/hpe3par.py
@@ -10,26 +10,26 @@ __metaclass__ = type
class ModuleDocFragment(object):
# HPE 3PAR doc fragment
- DOCUMENTATION = '''
+ DOCUMENTATION = r"""
options:
- storage_system_ip:
- description:
- - The storage system IP address.
- type: str
- required: true
- storage_system_password:
- description:
- - The storage system password.
- type: str
- required: true
- storage_system_username:
- description:
- - The storage system user name.
- type: str
- required: true
+ storage_system_ip:
+ description:
+ - The storage system IP address.
+ type: str
+ required: true
+ storage_system_password:
+ description:
+ - The storage system password.
+ type: str
+ required: true
+ storage_system_username:
+ description:
+ - The storage system user name.
+ type: str
+ required: true
requirements:
- hpe3par_sdk >= 1.0.2. Install using C(pip install hpe3par_sdk).
- WSAPI service should be enabled on the 3PAR storage array.
notes:
- '''
+"""
diff --git a/plugins/doc_fragments/hwc.py b/plugins/doc_fragments/hwc.py
index 8b9ae92b8f..ea54c80c09 100644
--- a/plugins/doc_fragments/hwc.py
+++ b/plugins/doc_fragments/hwc.py
@@ -10,56 +10,50 @@ __metaclass__ = type
class ModuleDocFragment(object):
# HWC doc fragment.
- DOCUMENTATION = '''
+ DOCUMENTATION = r"""
options:
- identity_endpoint:
- description:
- - The Identity authentication URL.
- type: str
- required: true
- user:
- description:
- - The user name to login with.
- - Currently only user names are supported, and not user IDs.
- type: str
- required: true
- password:
- description:
- - The password to login with.
- type: str
- required: true
- domain:
- description:
- - The name of the Domain to scope to (Identity v3).
- - Currently only domain names are supported, and not domain IDs.
- type: str
- required: true
- project:
- description:
- - The name of the Tenant (Identity v2) or Project (Identity v3).
- - Currently only project names are supported, and not project IDs.
- type: str
- required: true
- region:
- description:
- - The region to which the project belongs.
- type: str
- id:
- description:
- - The ID of resource to be managed.
- type: str
+ identity_endpoint:
+ description:
+ - The Identity authentication URL.
+ type: str
+ required: true
+ user:
+ description:
+ - The user name to login with.
+ - Currently only user names are supported, and not user IDs.
+ type: str
+ required: true
+ password:
+ description:
+ - The password to login with.
+ type: str
+ required: true
+ domain:
+ description:
+ - The name of the Domain to scope to (Identity v3).
+ - Currently only domain names are supported, and not domain IDs.
+ type: str
+ required: true
+ project:
+ description:
+ - The name of the Tenant (Identity v2) or Project (Identity v3).
+ - Currently only project names are supported, and not project IDs.
+ type: str
+ required: true
+ region:
+ description:
+ - The region to which the project belongs.
+ type: str
+ id:
+ description:
+ - The ID of resource to be managed.
+ type: str
notes:
- - For authentication, you can set identity_endpoint using the
- E(ANSIBLE_HWC_IDENTITY_ENDPOINT) environment variable.
- - For authentication, you can set user using the
- E(ANSIBLE_HWC_USER) environment variable.
- - For authentication, you can set password using the E(ANSIBLE_HWC_PASSWORD) environment
- variable.
- - For authentication, you can set domain using the E(ANSIBLE_HWC_DOMAIN) environment
- variable.
- - For authentication, you can set project using the E(ANSIBLE_HWC_PROJECT) environment
- variable.
+ - For authentication, you can set identity_endpoint using the E(ANSIBLE_HWC_IDENTITY_ENDPOINT) environment variable.
+ - For authentication, you can set user using the E(ANSIBLE_HWC_USER) environment variable.
+ - For authentication, you can set password using the E(ANSIBLE_HWC_PASSWORD) environment variable.
+ - For authentication, you can set domain using the E(ANSIBLE_HWC_DOMAIN) environment variable.
+ - For authentication, you can set project using the E(ANSIBLE_HWC_PROJECT) environment variable.
- For authentication, you can set region using the E(ANSIBLE_HWC_REGION) environment variable.
- - Environment variables values will only be used if the playbook values are
- not set.
-'''
+ - Environment variables values are only used when the playbook values are not set.
+"""
diff --git a/plugins/doc_fragments/ibm_storage.py b/plugins/doc_fragments/ibm_storage.py
index 7783d9ca56..ca48ef2c4d 100644
--- a/plugins/doc_fragments/ibm_storage.py
+++ b/plugins/doc_fragments/ibm_storage.py
@@ -12,26 +12,25 @@ __metaclass__ = type
class ModuleDocFragment(object):
# ibm_storage documentation fragment
- DOCUMENTATION = r'''
+ DOCUMENTATION = r"""
options:
- username:
- description:
- - Management user on the spectrum accelerate storage system.
- type: str
- required: true
- password:
- description:
- - Password for username on the spectrum accelerate storage system.
- type: str
- required: true
- endpoints:
- description:
- - The hostname or management IP of Spectrum Accelerate storage system.
- type: str
- required: true
+ username:
+ description:
+ - Management user on the Spectrum Accelerate storage system.
+ type: str
+ required: true
+ password:
+ description:
+ - Password for username on the Spectrum Accelerate storage system.
+ type: str
+ required: true
+ endpoints:
+ description:
+ - The hostname or management IP of Spectrum Accelerate storage system.
+ type: str
+ required: true
notes:
- - This module requires pyxcli python library.
- Use C(pip install pyxcli) in order to get pyxcli.
+ - This module requires pyxcli python library. Use C(pip install pyxcli) in order to get pyxcli.
requirements:
- pyxcli
-'''
+"""
diff --git a/plugins/doc_fragments/influxdb.py b/plugins/doc_fragments/influxdb.py
index fc0ca02ac7..5dbebea846 100644
--- a/plugins/doc_fragments/influxdb.py
+++ b/plugins/doc_fragments/influxdb.py
@@ -11,72 +11,72 @@ __metaclass__ = type
class ModuleDocFragment(object):
# Parameters for influxdb modules
- DOCUMENTATION = r'''
+ DOCUMENTATION = r"""
options:
hostname:
description:
- - The hostname or IP address on which InfluxDB server is listening.
+ - The hostname or IP address on which InfluxDB server is listening.
type: str
default: localhost
username:
description:
- - Username that will be used to authenticate against InfluxDB server.
+ - Username that is used to authenticate against InfluxDB server.
type: str
default: root
- aliases: [ login_username ]
+ aliases: [login_username]
password:
description:
- - Password that will be used to authenticate against InfluxDB server.
+ - Password that is used to authenticate against InfluxDB server.
type: str
default: root
- aliases: [ login_password ]
+ aliases: [login_password]
port:
description:
- - The port on which InfluxDB server is listening.
+ - The port on which InfluxDB server is listening.
type: int
default: 8086
path:
description:
- - The path on which InfluxDB server is accessible.
- - Only available when using python-influxdb >= 5.1.0.
+ - The path on which InfluxDB server is accessible.
+ - Only available when using python-influxdb >= 5.1.0.
type: str
default: ''
version_added: '0.2.0'
validate_certs:
description:
- - If set to V(false), the SSL certificates will not be validated.
- - This should only set to V(false) used on personally controlled sites using self-signed certificates.
+ - If set to V(false), the SSL certificates are not validated.
+ - This should only set to V(false) used on personally controlled sites using self-signed certificates.
type: bool
default: true
ssl:
description:
- - Use https instead of http to connect to InfluxDB server.
+ - Use https instead of http to connect to InfluxDB server.
type: bool
default: false
timeout:
description:
- - Number of seconds Requests will wait for client to establish a connection.
+ - Number of seconds Requests waits for client to establish a connection.
type: int
retries:
description:
- - Number of retries client will try before aborting.
- - V(0) indicates try until success.
- - Only available when using python-influxdb >= 4.1.0.
+ - Number of retries client performs before aborting.
+ - V(0) indicates try until success.
+ - Only available when using C(python-influxdb) >= 4.1.0.
type: int
default: 3
use_udp:
description:
- - Use UDP to connect to InfluxDB server.
+ - Use UDP to connect to InfluxDB server.
type: bool
default: false
udp_port:
description:
- - UDP port to connect to InfluxDB server.
+ - UDP port to connect to InfluxDB server.
type: int
default: 4444
proxies:
description:
- - HTTP(S) proxy to use for Requests to connect to InfluxDB server.
+ - HTTP(S) proxy to use for Requests to connect to InfluxDB server.
type: dict
default: {}
-'''
+"""
diff --git a/plugins/doc_fragments/ipa.py b/plugins/doc_fragments/ipa.py
index 7e091a94aa..63ea94b465 100644
--- a/plugins/doc_fragments/ipa.py
+++ b/plugins/doc_fragments/ipa.py
@@ -11,61 +11,66 @@ __metaclass__ = type
class ModuleDocFragment(object):
# Parameters for FreeIPA/IPA modules
- DOCUMENTATION = r'''
+ DOCUMENTATION = r"""
options:
ipa_port:
description:
- - Port of FreeIPA / IPA server.
- - If the value is not specified in the task, the value of environment variable E(IPA_PORT) will be used instead.
- - If both the environment variable E(IPA_PORT) and the value are not specified in the task, then default value is set.
+ - Port of FreeIPA / IPA server.
+ - If the value is not specified in the task, the value of environment variable E(IPA_PORT) is used instead.
+ - If both the environment variable E(IPA_PORT) and the value are not specified in the task, then default value is set.
type: int
default: 443
ipa_host:
description:
- - IP or hostname of IPA server.
- - If the value is not specified in the task, the value of environment variable E(IPA_HOST) will be used instead.
- - If both the environment variable E(IPA_HOST) and the value are not specified in the task, then DNS will be used to try to discover the FreeIPA server.
- - The relevant entry needed in FreeIPA is the C(ipa-ca) entry.
- - If neither the DNS entry, nor the environment E(IPA_HOST), nor the value are available in the task, then the default value will be used.
+ - IP or hostname of IPA server.
+ - If the value is not specified in the task, the value of environment variable E(IPA_HOST) is used instead.
+ - If both the environment variable E(IPA_HOST) and the value are not specified in the task, then DNS is used to try
+ to discover the FreeIPA server.
+ - The relevant entry needed in FreeIPA is the C(ipa-ca) entry.
+ - If neither the DNS entry, nor the environment E(IPA_HOST), nor the value are available in the task, then the default
+ value is used.
type: str
default: ipa.example.com
ipa_user:
description:
- - Administrative account used on IPA server.
- - If the value is not specified in the task, the value of environment variable E(IPA_USER) will be used instead.
- - If both the environment variable E(IPA_USER) and the value are not specified in the task, then default value is set.
+ - Administrative account used on IPA server.
+ - If the value is not specified in the task, the value of environment variable E(IPA_USER) is used instead.
+ - If both the environment variable E(IPA_USER) and the value are not specified in the task, then default value is set.
type: str
default: admin
ipa_pass:
description:
- - Password of administrative user.
- - If the value is not specified in the task, the value of environment variable E(IPA_PASS) will be used instead.
- - Note that if the C(urllib_gssapi) library is available, it is possible to use GSSAPI to authenticate to FreeIPA.
- - If the environment variable E(KRB5CCNAME) is available, the module will use this kerberos credentials cache to authenticate to the FreeIPA server.
- - If the environment variable E(KRB5_CLIENT_KTNAME) is available, and E(KRB5CCNAME) is not; the module will use this kerberos keytab to authenticate.
- - If GSSAPI is not available, the usage of O(ipa_pass) is required.
+ - Password of administrative user.
+ - If the value is not specified in the task, the value of environment variable E(IPA_PASS) is used instead.
+ - Note that if the C(urllib_gssapi) library is available, it is possible to use GSSAPI to authenticate to FreeIPA.
+ - If the environment variable E(KRB5CCNAME) is available, the module uses this Kerberos credentials cache to authenticate
+ to the FreeIPA server.
+ - If the environment variable E(KRB5_CLIENT_KTNAME) is available, and E(KRB5CCNAME) is not; the module uses this Kerberos
+ keytab to authenticate.
+ - If GSSAPI is not available, the usage of O(ipa_pass) is required.
type: str
ipa_prot:
description:
- - Protocol used by IPA server.
- - If the value is not specified in the task, the value of environment variable E(IPA_PROT) will be used instead.
- - If both the environment variable E(IPA_PROT) and the value are not specified in the task, then default value is set.
+ - Protocol used by IPA server.
+ - If the value is not specified in the task, the value of environment variable E(IPA_PROT) is used instead.
+ - If both the environment variable E(IPA_PROT) and the value are not specified in the task, then default value is set.
type: str
- choices: [ http, https ]
+ choices: [http, https]
default: https
validate_certs:
description:
- - This only applies if O(ipa_prot) is V(https).
- - If set to V(false), the SSL certificates will not be validated.
- - This should only set to V(false) used on personally controlled sites using self-signed certificates.
+ - This only applies if O(ipa_prot) is V(https).
+ - If set to V(false), the SSL certificates are not validated.
+ - This should only set to V(false) used on personally controlled sites using self-signed certificates.
type: bool
default: true
ipa_timeout:
description:
- - Specifies idle timeout (in seconds) for the connection.
- - For bulk operations, you may want to increase this in order to avoid timeout from IPA server.
- - If the value is not specified in the task, the value of environment variable E(IPA_TIMEOUT) will be used instead.
- - If both the environment variable E(IPA_TIMEOUT) and the value are not specified in the task, then default value is set.
+ - Specifies idle timeout (in seconds) for the connection.
+ - For bulk operations, you may want to increase this in order to avoid timeout from IPA server.
+ - If the value is not specified in the task, the value of environment variable E(IPA_TIMEOUT) is used instead.
+ - If both the environment variable E(IPA_TIMEOUT) and the value are not specified in the task, then default value is
+ set.
type: int
default: 10
-'''
+"""
diff --git a/plugins/doc_fragments/keycloak.py b/plugins/doc_fragments/keycloak.py
index 9b21ce52c9..75c458d5fc 100644
--- a/plugins/doc_fragments/keycloak.py
+++ b/plugins/doc_fragments/keycloak.py
@@ -11,69 +11,85 @@ __metaclass__ = type
class ModuleDocFragment(object):
# Standard documentation fragment
- DOCUMENTATION = r'''
+ DOCUMENTATION = r"""
options:
- auth_keycloak_url:
- description:
- - URL to the Keycloak instance.
- type: str
- required: true
- aliases:
- - url
+ auth_keycloak_url:
+ description:
+ - URL to the Keycloak instance.
+ type: str
+ required: true
+ aliases:
+ - url
- auth_client_id:
- description:
- - OpenID Connect C(client_id) to authenticate to the API with.
- type: str
- default: admin-cli
+ auth_client_id:
+ description:
+ - OpenID Connect C(client_id) to authenticate to the API with.
+ type: str
+ default: admin-cli
- auth_realm:
- description:
- - Keycloak realm name to authenticate to for API access.
- type: str
+ auth_realm:
+ description:
+ - Keycloak realm name to authenticate to for API access.
+ type: str
- auth_client_secret:
- description:
- - Client Secret to use in conjunction with O(auth_client_id) (if required).
- type: str
+ auth_client_secret:
+ description:
+ - Client Secret to use in conjunction with O(auth_client_id) (if required).
+ type: str
- auth_username:
- description:
- - Username to authenticate for API access with.
- type: str
- aliases:
- - username
+ auth_username:
+ description:
+ - Username to authenticate for API access with.
+ type: str
+ aliases:
+ - username
- auth_password:
- description:
- - Password to authenticate for API access with.
- type: str
- aliases:
- - password
+ auth_password:
+ description:
+ - Password to authenticate for API access with.
+ type: str
+ aliases:
+ - password
- token:
- description:
- - Authentication token for Keycloak API.
- type: str
- version_added: 3.0.0
+ token:
+ description:
+ - Authentication token for Keycloak API.
+ type: str
+ version_added: 3.0.0
- validate_certs:
- description:
- - Verify TLS certificates (do not disable this in production).
- type: bool
- default: true
+ refresh_token:
+ description:
+ - Authentication refresh token for Keycloak API.
+ type: str
+ version_added: 10.3.0
- connection_timeout:
- description:
- - Controls the HTTP connections timeout period (in seconds) to Keycloak API.
- type: int
- default: 10
- version_added: 4.5.0
+ validate_certs:
+ description:
+ - Verify TLS certificates (do not disable this in production).
+ type: bool
+ default: true
- http_agent:
- description:
- - Configures the HTTP User-Agent header.
- type: str
- default: Ansible
- version_added: 5.4.0
-'''
+ connection_timeout:
+ description:
+ - Controls the HTTP connections timeout period (in seconds) to Keycloak API.
+ type: int
+ default: 10
+ version_added: 4.5.0
+
+ http_agent:
+ description:
+ - Configures the HTTP User-Agent header.
+ type: str
+ default: Ansible
+ version_added: 5.4.0
+"""
+
+ ACTIONGROUP_KEYCLOAK = r"""
+options: {}
+attributes:
+ action_group:
+ description: Use C(group/community.general.keycloak) in C(module_defaults) to set defaults for this module.
+ support: full
+ membership:
+ - community.general.keycloak
+"""
diff --git a/plugins/doc_fragments/ldap.py b/plugins/doc_fragments/ldap.py
index e11ab065d8..abdb32adb7 100644
--- a/plugins/doc_fragments/ldap.py
+++ b/plugins/doc_fragments/ldap.py
@@ -12,12 +12,17 @@ __metaclass__ = type
class ModuleDocFragment(object):
# Standard LDAP documentation fragment
- DOCUMENTATION = r'''
+ DOCUMENTATION = r"""
+notes:
+ - The default authentication settings attempts to use a SASL EXTERNAL bind over a UNIX domain socket. This works well with
+ the default Ubuntu install for example, which includes a C(cn=peercred,cn=external,cn=auth) ACL rule allowing root to
+ modify the server configuration. If you need to use a simple bind to access your server, pass the credentials in O(bind_dn)
+ and O(bind_pw).
options:
bind_dn:
description:
- - A DN to bind with. If this is omitted, we'll try a SASL bind with the EXTERNAL mechanism as default.
- - If this is blank, we'll use an anonymous bind.
+ - A DN to bind with. Try to use a SASL bind with the EXTERNAL mechanism as default when this parameter is omitted.
+ - Use an anonymous bind if the parameter is blank.
type: str
bind_pw:
description:
@@ -57,7 +62,8 @@ options:
version_added: 2.0.0
server_uri:
description:
- - The O(server_uri) parameter may be a comma- or whitespace-separated list of URIs containing only the schema, the host, and the port fields.
+ - The O(server_uri) parameter may be a comma- or whitespace-separated list of URIs containing only the schema, the host,
+ and the port fields.
- The default value lets the underlying LDAP client library look for a UNIX domain socket in its default location.
- Note that when using multiple URIs you cannot determine to which URI your client gets connected.
- For URIs containing additional fields, particularly when using commas, behavior is undefined.
@@ -65,12 +71,12 @@ options:
default: ldapi:///
start_tls:
description:
- - If true, we'll use the START_TLS LDAP extension.
+ - Use the START_TLS LDAP extension if set to V(true).
type: bool
default: false
validate_certs:
description:
- - If set to V(false), SSL certificates will not be validated.
+ - If set to V(false), SSL certificates are not validated.
- This should only be used on sites using self-signed certificates.
type: bool
default: true
@@ -84,11 +90,11 @@ options:
xorder_discovery:
description:
- Set the behavior on how to process Xordered DNs.
- - V(enable) will perform a C(ONELEVEL) search below the superior RDN to find the matching DN.
- - V(disable) will always use the DN unmodified (as passed by the O(dn) parameter).
- - V(auto) will only perform a search if the first RDN does not contain an index number (C({x})).
+ - V(enable) performs a C(ONELEVEL) search below the superior RDN to find the matching DN.
+ - V(disable) always uses the DN unmodified (as passed by the O(dn) parameter).
+ - V(auto) only performs a search if the first RDN does not contain an index number (C({x})).
type: str
choices: ['enable', 'auto', 'disable']
default: auto
version_added: "6.4.0"
-'''
+"""
diff --git a/plugins/doc_fragments/lxca_common.py b/plugins/doc_fragments/lxca_common.py
index eed6727c2a..85cdeb0f22 100644
--- a/plugins/doc_fragments/lxca_common.py
+++ b/plugins/doc_fragments/lxca_common.py
@@ -10,7 +10,7 @@ __metaclass__ = type
class ModuleDocFragment(object):
# Standard Pylxca documentation fragment
- DOCUMENTATION = r'''
+ DOCUMENTATION = r"""
author:
- Naval Patel (@navalkp)
- Prashant Bhosale (@prabhosa)
@@ -18,19 +18,19 @@ author:
options:
login_user:
description:
- - The username for use in HTTP basic authentication.
+ - The username for use in HTTP basic authentication.
type: str
required: true
login_password:
description:
- - The password for use in HTTP basic authentication.
+ - The password for use in HTTP basic authentication.
type: str
required: true
auth_url:
description:
- - lxca HTTPS full web address.
+ - Lxca HTTPS full web address.
type: str
required: true
@@ -40,4 +40,4 @@ requirements:
notes:
- Additional detail about pylxca can be found at U(https://github.com/lenovo/pylxca).
- Playbooks using these modules can be found at U(https://github.com/lenovo/ansible.lenovo-lxca).
-'''
+"""
diff --git a/plugins/doc_fragments/manageiq.py b/plugins/doc_fragments/manageiq.py
index 8afc183a5c..4b9ea1ff52 100644
--- a/plugins/doc_fragments/manageiq.py
+++ b/plugins/doc_fragments/manageiq.py
@@ -11,7 +11,7 @@ __metaclass__ = type
class ModuleDocFragment(object):
# Standard ManageIQ documentation fragment
- DOCUMENTATION = r'''
+ DOCUMENTATION = r"""
options:
manageiq_connection:
description:
@@ -34,20 +34,21 @@ options:
type: str
token:
description:
- - ManageIQ token. E(MIQ_TOKEN) environment variable if set. Otherwise, required if no username or password is passed in.
+ - ManageIQ token. E(MIQ_TOKEN) environment variable if set. Otherwise, required if no username or password is passed
+ in.
type: str
validate_certs:
description:
- Whether SSL certificates should be verified for HTTPS requests.
type: bool
default: true
- aliases: [ verify_ssl ]
+ aliases: [verify_ssl]
ca_cert:
description:
- The path to a CA bundle file or directory with certificates.
type: str
- aliases: [ ca_bundle_path ]
+ aliases: [ca_bundle_path]
requirements:
- 'manageiq-client U(https://github.com/ManageIQ/manageiq-api-client-python/)'
-'''
+"""
diff --git a/plugins/doc_fragments/nomad.py b/plugins/doc_fragments/nomad.py
index 1571c211c9..68787e835c 100644
--- a/plugins/doc_fragments/nomad.py
+++ b/plugins/doc_fragments/nomad.py
@@ -11,48 +11,48 @@ __metaclass__ = type
class ModuleDocFragment(object):
# Standard files documentation fragment
- DOCUMENTATION = r'''
+ DOCUMENTATION = r"""
options:
- host:
- description:
- - FQDN of Nomad server.
- required: true
- type: str
- port:
- description:
- - Port of Nomad server.
- type: int
- default: 4646
- version_added: 8.0.0
- use_ssl:
- description:
- - Use TLS/SSL connection.
- type: bool
- default: true
- timeout:
- description:
- - Timeout (in seconds) for the request to Nomad.
- type: int
- default: 5
- validate_certs:
- description:
- - Enable TLS/SSL certificate validation.
- type: bool
- default: true
- client_cert:
- description:
- - Path of certificate for TLS/SSL.
- type: path
- client_key:
- description:
- - Path of certificate's private key for TLS/SSL.
- type: path
- namespace:
- description:
- - Namespace for Nomad.
- type: str
- token:
- description:
- - ACL token for authentication.
- type: str
-'''
+ host:
+ description:
+ - FQDN of Nomad server.
+ required: true
+ type: str
+ port:
+ description:
+ - Port of Nomad server.
+ type: int
+ default: 4646
+ version_added: 8.0.0
+ use_ssl:
+ description:
+ - Use TLS/SSL connection.
+ type: bool
+ default: true
+ timeout:
+ description:
+ - Timeout (in seconds) for the request to Nomad.
+ type: int
+ default: 5
+ validate_certs:
+ description:
+ - Enable TLS/SSL certificate validation.
+ type: bool
+ default: true
+ client_cert:
+ description:
+ - Path of certificate for TLS/SSL.
+ type: path
+ client_key:
+ description:
+ - Path of certificate's private key for TLS/SSL.
+ type: path
+ namespace:
+ description:
+ - Namespace for Nomad.
+ type: str
+ token:
+ description:
+ - ACL token for authentication.
+ type: str
+"""
diff --git a/plugins/doc_fragments/onepassword.py b/plugins/doc_fragments/onepassword.py
index a67c9e4dc1..6fb0e252c6 100644
--- a/plugins/doc_fragments/onepassword.py
+++ b/plugins/doc_fragments/onepassword.py
@@ -18,8 +18,8 @@ options:
aliases: ['vault_password']
type: str
section:
- description: Item section containing the field to retrieve (case-insensitive). If absent will return first match from
- any section.
+ description: Item section containing the field to retrieve (case-insensitive). If absent, returns first match from any
+ section.
domain:
description: Domain of 1Password.
default: '1password.com'
@@ -42,7 +42,7 @@ options:
- Only works with 1Password CLI version 2 or later.
type: str
vault:
- description: Vault containing the item to retrieve (case-insensitive). If absent will search all vaults.
+ description: Vault containing the item to retrieve (case-insensitive). If absent, searches all vaults.
type: str
connect_host:
description: The host for 1Password Connect. Must be used in combination with O(connect_token).
@@ -65,10 +65,9 @@ options:
- name: OP_SERVICE_ACCOUNT_TOKEN
version_added: 8.2.0
notes:
- - This lookup will use an existing 1Password session if one exists. If not, and you have already performed an initial sign
- in (meaning C(~/.op/config), C(~/.config/op/config) or C(~/.config/.op/config) exists), then only the O(master_password)
- is required. You may optionally specify O(subdomain) in this scenario, otherwise the last used subdomain will be used
- by C(op).
+ - This lookup uses an existing 1Password session if one exists. If not, and you have already performed an initial sign in
+ (meaning C(~/.op/config), C(~/.config/op/config) or C(~/.config/.op/config) exists), then only the O(master_password)
+ is required. You may optionally specify O(subdomain) in this scenario, otherwise the last used subdomain is used by C(op).
- This lookup can perform an initial login by providing O(subdomain), O(username), O(secret_key), and O(master_password).
- Can target a specific account by providing the O(account_id).
- Due to the B(very) sensitive nature of these credentials, it is B(highly) recommended that you only pass in the minimal
diff --git a/plugins/doc_fragments/oneview.py b/plugins/doc_fragments/oneview.py
index 3caabe4512..366e3e3e42 100644
--- a/plugins/doc_fragments/oneview.py
+++ b/plugins/doc_fragments/oneview.py
@@ -17,8 +17,8 @@ options:
description:
- Path to a JSON configuration file containing the OneView client configuration. The configuration file is optional
and when used should be present in the host running the ansible commands. If the file path is not provided, the configuration
- will be loaded from environment variables. For links to example configuration files or how to use the environment
- variables verify the notes section.
+ is loaded from environment variables. For links to example configuration files or how to use the environment variables
+ verify the notes section.
type: path
api_version:
description:
@@ -49,16 +49,16 @@ notes:
U(https://github.com/HewlettPackard/oneview-ansible/blob/master/examples/oneview_config-rename.json).'
- 'Check how to use environment variables for configuration at: U(https://github.com/HewlettPackard/oneview-ansible#environment-variables).'
- 'Additional Playbooks for the HPE OneView Ansible modules can be found at: U(https://github.com/HewlettPackard/oneview-ansible/tree/master/examples).'
- - 'The OneView API version used will directly affect returned and expected fields in resources. Information on setting the
- desired API version and can be found at: U(https://github.com/HewlettPackard/oneview-ansible#setting-your-oneview-version).'
+ - 'The OneView API version used directly affects returned and expected fields in resources. Information on setting the desired
+ API version and can be found at: U(https://github.com/HewlettPackard/oneview-ansible#setting-your-oneview-version).'
"""
VALIDATEETAG = r"""
options:
validate_etag:
description:
- - When the ETag Validation is enabled, the request will be conditionally processed only if the current ETag for the
- resource matches the ETag provided in the data.
+ - When the ETag Validation is enabled, the request is conditionally processed only if the current ETag for the resource
+ matches the ETag provided in the data.
type: bool
default: true
"""
diff --git a/plugins/doc_fragments/openswitch.py b/plugins/doc_fragments/openswitch.py
index f0e9e87c3d..30b477fbe7 100644
--- a/plugins/doc_fragments/openswitch.py
+++ b/plugins/doc_fragments/openswitch.py
@@ -21,8 +21,8 @@ options:
port:
description:
- Specifies the port to use when building the connection to the remote device. This value applies to either O(transport=cli)
- or O(transport=rest). The port value will default to the appropriate transport common port if none is provided in
- the task. (cli=22, http=80, https=443). Note this argument does not affect the SSH transport.
+ or O(transport=rest). The port value defaults to the appropriate transport common port if none is provided in the
+ task. (cli=22, http=80, https=443). Note this argument does not affect the SSH transport.
type: int
default: 0 (use common port)
username:
@@ -30,25 +30,24 @@ options:
- Configures the username to use to authenticate the connection to the remote device. This value is used to authenticate
either the CLI login or the eAPI authentication depending on which transport is used. Note this argument does not
affect the SSH transport. If the value is not specified in the task, the value of environment variable E(ANSIBLE_NET_USERNAME)
- will be used instead.
+ is used instead.
type: str
password:
description:
- Specifies the password to use to authenticate the connection to the remote device. This is a common argument used
for either O(transport=cli) or O(transport=rest). Note this argument does not affect the SSH transport. If the value
- is not specified in the task, the value of environment variable E(ANSIBLE_NET_PASSWORD) will be used instead.
+ is not specified in the task, the value of environment variable E(ANSIBLE_NET_PASSWORD) is used instead.
type: str
timeout:
description:
- Specifies the timeout in seconds for communicating with the network device for either connecting or sending commands.
- If the timeout is exceeded before the operation is completed, the module will error.
+ If the timeout is exceeded before the operation is completed, the module fails.
type: int
default: 10
ssh_keyfile:
description:
- Specifies the SSH key to use to authenticate the connection to the remote device. This argument is only used for O(transport=cli).
- If the value is not specified in the task, the value of environment variable E(ANSIBLE_NET_SSH_KEYFILE) will be used
- instead.
+ If the value is not specified in the task, the value of environment variable E(ANSIBLE_NET_SSH_KEYFILE) is used instead.
type: path
transport:
description:
diff --git a/plugins/doc_fragments/oracle.py b/plugins/doc_fragments/oracle.py
index f657af407b..08b2948bf3 100644
--- a/plugins/doc_fragments/oracle.py
+++ b/plugins/doc_fragments/oracle.py
@@ -40,9 +40,10 @@ options:
type: str
api_user_key_file:
description:
- - Full path and filename of the private key (in PEM format). If not set, then the value of the E(OCI_USER_KEY_FILE) variable,
- if any, is used. This option is required if the private key is not specified through a configuration file (See O(config_file_location)).
- If the key is encrypted with a pass-phrase, the O(api_user_key_pass_phrase) option must also be provided.
+ - Full path and filename of the private key (in PEM format). If not set, then the value of the E(OCI_USER_KEY_FILE)
+ variable, if any, is used. This option is required if the private key is not specified through a configuration file
+ (See O(config_file_location)). If the key is encrypted with a pass-phrase, the O(api_user_key_pass_phrase) option
+ must also be provided.
type: path
api_user_key_pass_phrase:
description:
@@ -53,9 +54,9 @@ options:
auth_type:
description:
- The type of authentication to use for making API requests. By default O(auth_type=api_key) based authentication is
- performed and the API key (see O(api_user_key_file)) in your config file will be used. If this 'auth_type' module
- option is not specified, the value of the E(OCI_ANSIBLE_AUTH_TYPE), if any, is used. Use O(auth_type=instance_principal)
- to use instance principal based authentication when running ansible playbooks within an OCI compute instance.
+ performed and the API key (see O(api_user_key_file)) in your config file is used. If O(auth_type) is not specified,
+ the value of the E(OCI_ANSIBLE_AUTH_TYPE), if any, is used. Use O(auth_type=instance_principal) to use instance principal
+ based authentication when running ansible playbooks within an OCI compute instance.
choices: ['api_key', 'instance_principal']
default: 'api_key'
type: str
diff --git a/plugins/doc_fragments/oracle_creatable_resource.py b/plugins/doc_fragments/oracle_creatable_resource.py
index be0c931db4..5ccd6525c0 100644
--- a/plugins/doc_fragments/oracle_creatable_resource.py
+++ b/plugins/doc_fragments/oracle_creatable_resource.py
@@ -12,8 +12,8 @@ class ModuleDocFragment(object):
options:
force_create:
description: Whether to attempt non-idempotent creation of a resource. By default, create resource is an idempotent operation,
- and does not create the resource if it already exists. Setting this option to V(true), forcefully creates a copy of the
- resource, even if it already exists. This option is mutually exclusive with O(key_by).
+ and does not create the resource if it already exists. Setting this option to V(true), forcefully creates a copy of
+ the resource, even if it already exists. This option is mutually exclusive with O(key_by).
default: false
type: bool
key_by:
diff --git a/plugins/doc_fragments/pipx.py b/plugins/doc_fragments/pipx.py
index b94495d4a1..dde13f6dd3 100644
--- a/plugins/doc_fragments/pipx.py
+++ b/plugins/doc_fragments/pipx.py
@@ -13,26 +13,22 @@ class ModuleDocFragment(object):
options:
global:
description:
- - The module will pass the C(--global) argument to C(pipx), to execute actions in global scope.
- - The C(--global) is only available in C(pipx>=1.6.0), so make sure to have a compatible version when using this option.
- Moreover, a nasty bug with C(--global) was fixed in C(pipx==1.7.0), so it is strongly recommended you used that version
- or newer.
+ - The module passes the C(--global) argument to C(pipx), to execute actions in global scope.
type: bool
default: false
executable:
description:
- Path to the C(pipx) installed in the system.
- - If not specified, the module will use C(python -m pipx) to run the tool, using the same Python interpreter as ansible
+ - If not specified, the module uses C(python -m pipx) to run the tool, using the same Python interpreter as ansible
itself.
type: path
+requirements:
+ - This module requires C(pipx) version 1.7.0 or above. Please note that C(pipx) 1.7.0 requires Python 3.8 or above.
notes:
- - This module requires C(pipx) version 0.16.2.1 or above. From community.general 11.0.0 onwards, the module will require
- C(pipx>=1.7.0).
- - Please note that C(pipx) requires Python 3.6 or above.
- This module does not install the C(pipx) python package, however that can be easily done with the module M(ansible.builtin.pip).
- This module does not require C(pipx) to be in the shell C(PATH), but it must be loadable by Python as a module.
- - This module will honor C(pipx) environment variables such as but not limited to E(PIPX_HOME) and E(PIPX_BIN_DIR) passed
- using the R(environment Ansible keyword, playbooks_environment).
+ - This module honors C(pipx) environment variables such as but not limited to E(PIPX_HOME) and E(PIPX_BIN_DIR) passed using
+ the R(environment Ansible keyword, playbooks_environment).
seealso:
- name: C(pipx) command manual page
description: Manual page for the command.
diff --git a/plugins/doc_fragments/proxmox.py b/plugins/doc_fragments/proxmox.py
deleted file mode 100644
index 4641c36d3e..0000000000
--- a/plugins/doc_fragments/proxmox.py
+++ /dev/null
@@ -1,84 +0,0 @@
-# -*- coding: utf-8 -*-
-# Copyright (c) Ansible project
-# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-
-class ModuleDocFragment(object):
- # Common parameters for Proxmox VE modules
- DOCUMENTATION = r"""
-options:
- api_host:
- description:
- - Specify the target host of the Proxmox VE cluster.
- type: str
- required: true
- api_port:
- description:
- - Specify the target port of the Proxmox VE cluster.
- - Uses the E(PROXMOX_PORT) environment variable if not specified.
- type: int
- required: false
- version_added: 9.1.0
- api_user:
- description:
- - Specify the user to authenticate with.
- type: str
- required: true
- api_password:
- description:
- - Specify the password to authenticate with.
- - You can use E(PROXMOX_PASSWORD) environment variable.
- type: str
- api_token_id:
- description:
- - Specify the token ID.
- - Requires C(proxmoxer>=1.1.0) to work.
- type: str
- version_added: 1.3.0
- api_token_secret:
- description:
- - Specify the token secret.
- - Requires C(proxmoxer>=1.1.0) to work.
- type: str
- version_added: 1.3.0
- validate_certs:
- description:
- - If V(false), SSL certificates will not be validated.
- - This should only be used on personally controlled sites using self-signed certificates.
- type: bool
- default: false
-requirements: ["proxmoxer", "requests"]
-"""
-
- SELECTION = r"""
-options:
- vmid:
- description:
- - Specifies the instance ID.
- - If not set the next available ID will be fetched from ProxmoxAPI.
- type: int
- node:
- description:
- - Proxmox VE node on which to operate.
- - Only required for O(state=present).
- - For every other states it will be autodiscovered.
- type: str
- pool:
- description:
- - Add the new VM to the specified pool.
- type: str
-"""
-
- ACTIONGROUP_PROXMOX = r"""
-options: {}
-attributes:
- action_group:
- description: Use C(group/community.general.proxmox) in C(module_defaults) to set defaults for this module.
- support: full
- membership:
- - community.general.proxmox
-"""
diff --git a/plugins/doc_fragments/purestorage.py b/plugins/doc_fragments/purestorage.py
index 7c42a4fec2..c2c6c9a262 100644
--- a/plugins/doc_fragments/purestorage.py
+++ b/plugins/doc_fragments/purestorage.py
@@ -10,16 +10,6 @@ __metaclass__ = type
class ModuleDocFragment(object):
- # Standard Pure Storage documentation fragment
- DOCUMENTATION = r"""
-options: {}
-# See separate platform section for more details
-requirements:
- - See separate platform section for more details
-notes:
- - 'Ansible modules are available for the following Pure Storage products: FlashArray, FlashBlade.'
-"""
-
# Documentation fragment for FlashBlade
FB = r"""
options:
diff --git a/plugins/doc_fragments/redfish.py b/plugins/doc_fragments/redfish.py
new file mode 100644
index 0000000000..a20e064988
--- /dev/null
+++ b/plugins/doc_fragments/redfish.py
@@ -0,0 +1,37 @@
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2025 Ansible community
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+
+ # Use together with the community.general.redfish module utils' REDFISH_COMMON_ARGUMENT_SPEC
+ DOCUMENTATION = r"""
+options:
+ validate_certs:
+ description:
+ - If V(false), TLS/SSL certificates are not validated.
+ - Set this to V(true) to enable certificate checking. Should be used together with O(ca_path).
+ type: bool
+ default: false
+ ca_path:
+ description:
+ - PEM formatted file that contains a CA certificate to be used for validation.
+ - Only used if O(validate_certs=true).
+ type: path
+ ciphers:
+ required: false
+ description:
+ - TLS/SSL Ciphers to use for the request.
+ - When a list is provided, all ciphers are joined in order with V(:).
+ - See the L(OpenSSL Cipher List Format,https://www.openssl.org/docs/manmaster/man1/openssl-ciphers.html#CIPHER-LIST-FORMAT)
+ for more details.
+ - The available ciphers is dependent on the Python and OpenSSL/LibreSSL versions.
+ type: list
+ elements: str
+"""
diff --git a/plugins/doc_fragments/redis.py b/plugins/doc_fragments/redis.py
index 149c018d79..c7bb88b81d 100644
--- a/plugins/doc_fragments/redis.py
+++ b/plugins/doc_fragments/redis.py
@@ -45,7 +45,7 @@ options:
default: true
ca_certs:
description:
- - Path to root certificates file. If not set and O(tls) is set to V(true), certifi ca-certificates will be used.
+ - Path to root certificates file. If not set and O(tls) is set to V(true), certifi's CA certificates are used.
type: str
client_cert_file:
description:
diff --git a/plugins/doc_fragments/utm.py b/plugins/doc_fragments/utm.py
index f6954a1917..32c18e93b8 100644
--- a/plugins/doc_fragments/utm.py
+++ b/plugins/doc_fragments/utm.py
@@ -31,7 +31,8 @@ options:
utm_token:
description:
- The token used to identify at the REST-API.
- - See U(https://www.sophos.com/en-us/medialibrary/PDFs/documentation/UTMonAWS/Sophos-UTM-RESTful-API.pdf?la=en), Chapter 2.4.2.
+ - See U(https://www.sophos.com/en-us/medialibrary/PDFs/documentation/UTMonAWS/Sophos-UTM-RESTful-API.pdf?la=en), Chapter
+ 2.4.2.
type: str
required: true
utm_protocol:
@@ -48,8 +49,8 @@ options:
state:
description:
- The desired state of the object.
- - V(present) will create or update an object.
- - V(absent) will delete an object if it was present.
+ - V(present) creates or updates an object.
+ - V(absent) deletes an object if present.
type: str
choices: [absent, present]
default: present
diff --git a/plugins/doc_fragments/vexata.py b/plugins/doc_fragments/vexata.py
index 257a4ccd14..48ff30a276 100644
--- a/plugins/doc_fragments/vexata.py
+++ b/plugins/doc_fragments/vexata.py
@@ -10,15 +10,6 @@ __metaclass__ = type
class ModuleDocFragment(object):
- DOCUMENTATION = r"""
-options: {}
-# See respective platform section for more details
-requirements:
- - See respective platform section for more details
-notes:
- - Ansible modules are available for Vexata VX100 arrays.
-"""
-
# Documentation fragment for Vexata VX100 series
VX100 = r'''
options:
diff --git a/plugins/doc_fragments/xenserver.py b/plugins/doc_fragments/xenserver.py
index d1377e8964..f4e0946219 100644
--- a/plugins/doc_fragments/xenserver.py
+++ b/plugins/doc_fragments/xenserver.py
@@ -15,28 +15,27 @@ options:
hostname:
description:
- The hostname or IP address of the XenServer host or XenServer pool master.
- - If the value is not specified in the task, the value of environment variable E(XENSERVER_HOST) will be used instead.
+ - If the value is not specified in the task, the value of environment variable E(XENSERVER_HOST) is used instead.
type: str
default: localhost
aliases: [host, pool]
username:
description:
- The username to use for connecting to XenServer.
- - If the value is not specified in the task, the value of environment variable E(XENSERVER_USER) will be used instead.
+ - If the value is not specified in the task, the value of environment variable E(XENSERVER_USER) is used instead.
type: str
default: root
aliases: [admin, user]
password:
description:
- The password to use for connecting to XenServer.
- - If the value is not specified in the task, the value of environment variable E(XENSERVER_PASSWORD) will be used instead.
+ - If the value is not specified in the task, the value of environment variable E(XENSERVER_PASSWORD) is used instead.
type: str
aliases: [pass, pwd]
validate_certs:
description:
- Allows connection when SSL certificates are not valid. Set to V(false) when certificates are not trusted.
- - If the value is not specified in the task, the value of environment variable E(XENSERVER_VALIDATE_CERTS) will be used
- instead.
+ - If the value is not specified in the task, the value of environment variable E(XENSERVER_VALIDATE_CERTS) is used instead.
type: bool
default: true
"""
diff --git a/plugins/filter/accumulate.py b/plugins/filter/accumulate.py
new file mode 100644
index 0000000000..c48afa0467
--- /dev/null
+++ b/plugins/filter/accumulate.py
@@ -0,0 +1,63 @@
+# Copyright (c) Max Gautier
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+DOCUMENTATION = r"""
+name: accumulate
+short_description: Produce a list of accumulated sums of the input list contents
+version_added: 10.1.0
+author: Max Gautier (@VannTen)
+description:
+ - Passthrough to the L(Python itertools.accumulate function,https://docs.python.org/3/library/itertools.html#itertools.accumulate).
+ - Transforms an input list into the cumulative list of results from applying addition to the elements of the input list.
+ - Addition means the default Python implementation of C(+) for input list elements type.
+options:
+ _input:
+ description: A list.
+ type: list
+ elements: any
+ required: true
+"""
+
+RETURN = r"""
+_value:
+ description: A list of cumulated sums of the elements of the input list.
+ type: list
+ elements: any
+"""
+
+EXAMPLES = r"""
+- name: Enumerate parent directories of some path
+ ansible.builtin.debug:
+ var: >
+ "/some/path/to/my/file"
+ | split('/') | map('split', '/')
+ | community.general.accumulate | map('join', '/')
+ # Produces: ['', '/some', '/some/path', '/some/path/to', '/some/path/to/my', '/some/path/to/my/file']
+
+- name: Growing string
+ ansible.builtin.debug:
+ var: "'abc' | community.general.accumulate"
+ # Produces ['a', 'ab', 'abc']
+"""
+
+from itertools import accumulate
+from collections.abc import Sequence
+
+from ansible.errors import AnsibleFilterError
+
+
+def list_accumulate(sequence):
+ if not isinstance(sequence, Sequence):
+ raise AnsibleFilterError('Invalid value type (%s) for accumulate (%r)' %
+ (type(sequence), sequence))
+
+ return accumulate(sequence)
+
+
+class FilterModule(object):
+
+ def filters(self):
+ return {
+ 'accumulate': list_accumulate,
+ }
diff --git a/plugins/filter/counter.py b/plugins/filter/counter.py
index 1b79294b59..bd4b5d4448 100644
--- a/plugins/filter/counter.py
+++ b/plugins/filter/counter.py
@@ -3,37 +3,37 @@
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
-DOCUMENTATION = '''
- name: counter
- short_description: Counts hashable elements in a sequence
- version_added: 4.3.0
- author: Rémy Keil (@keilr)
- description:
- - Counts hashable elements in a sequence.
- options:
- _input:
- description: A sequence.
- type: list
- elements: any
- required: true
-'''
+DOCUMENTATION = r"""
+name: counter
+short_description: Counts hashable elements in a sequence
+version_added: 4.3.0
+author: Rémy Keil (@keilr)
+description:
+ - Counts hashable elements in a sequence.
+options:
+ _input:
+ description: A sequence.
+ type: list
+ elements: any
+ required: true
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Count occurrences
ansible.builtin.debug:
msg: >-
{{ [1, 'a', 2, 2, 'a', 'b', 'a'] | community.general.counter }}
# Produces: {1: 1, 'a': 3, 2: 2, 'b': 1}
-'''
+"""
-RETURN = '''
- _value:
- description: A dictionary with the elements of the sequence as keys, and their number of occurrences in the sequence as values.
- type: dictionary
-'''
+RETURN = r"""
+_value:
+ description: A dictionary with the elements of the sequence as keys, and their number of occurrences in the sequence as
+ values.
+ type: dictionary
+"""
from ansible.errors import AnsibleFilterError
from ansible.module_utils.common._collections_compat import Sequence
diff --git a/plugins/filter/crc32.py b/plugins/filter/crc32.py
index 1f0aa2e9b0..e394d23732 100644
--- a/plugins/filter/crc32.py
+++ b/plugins/filter/crc32.py
@@ -2,8 +2,7 @@
# Copyright (c) 2022, Julien Riou
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
from ansible.errors import AnsibleFilterError
from ansible.module_utils.common.text.converters import to_bytes
@@ -16,33 +15,33 @@ except ImportError:
HAS_ZLIB = False
-DOCUMENTATION = '''
- name: crc32
- short_description: Generate a CRC32 checksum
- version_added: 5.4.0
- description:
- - Checksum a string using CRC32 algorithm and return its hexadecimal representation.
- options:
- _input:
- description:
- - The string to checksum.
- type: string
- required: true
- author:
- - Julien Riou
-'''
-
-EXAMPLES = '''
- - name: Checksum a test string
- ansible.builtin.debug:
- msg: "{{ 'test' | community.general.crc32 }}"
-'''
-
-RETURN = '''
- _value:
- description: CRC32 checksum.
+DOCUMENTATION = r"""
+name: crc32
+short_description: Generate a CRC32 checksum
+version_added: 5.4.0
+description:
+ - Checksum a string using CRC32 algorithm and return its hexadecimal representation.
+options:
+ _input:
+ description:
+ - The string to checksum.
type: string
-'''
+ required: true
+author:
+ - Julien Riou
+"""
+
+EXAMPLES = r"""
+- name: Checksum a test string
+ ansible.builtin.debug:
+ msg: "{{ 'test' | community.general.crc32 }}"
+"""
+
+RETURN = r"""
+_value:
+ description: CRC32 checksum.
+ type: string
+"""
def crc32s(value):
diff --git a/plugins/filter/dict.py b/plugins/filter/dict.py
index 3e0558bb61..23c977dfd6 100644
--- a/plugins/filter/dict.py
+++ b/plugins/filter/dict.py
@@ -4,25 +4,24 @@
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
+from __future__ import annotations
-DOCUMENTATION = '''
- name: dict
- short_description: Convert a list of tuples into a dictionary
- version_added: 3.0.0
- author: Felix Fontein (@felixfontein)
- description:
- - Convert a list of tuples into a dictionary. This is a filter version of the C(dict) function.
- options:
- _input:
- description: A list of tuples (with exactly two elements).
- type: list
- elements: tuple
- required: true
-'''
+DOCUMENTATION = r"""
+name: dict
+short_description: Convert a list of tuples into a dictionary
+version_added: 3.0.0
+author: Felix Fontein (@felixfontein)
+description:
+ - Convert a list of tuples into a dictionary. This is a filter version of the C(dict) function.
+options:
+ _input:
+ description: A list of tuples (with exactly two elements).
+ type: list
+ elements: tuple
+ required: true
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Convert list of tuples into dictionary
ansible.builtin.set_fact:
dictionary: "{{ [[1, 2], ['a', 'b']] | community.general.dict }}"
@@ -53,13 +52,13 @@ EXAMPLES = '''
# "k2": 42,
# "k3": "b"
# }
-'''
+"""
-RETURN = '''
- _value:
- description: A dictionary with the provided key-value pairs.
- type: dictionary
-'''
+RETURN = r"""
+_value:
+ description: A dictionary with the provided key-value pairs.
+ type: dictionary
+"""
def dict_filter(sequence):
diff --git a/plugins/filter/dict_kv.py b/plugins/filter/dict_kv.py
index 59595f9573..1d73bde301 100644
--- a/plugins/filter/dict_kv.py
+++ b/plugins/filter/dict_kv.py
@@ -3,40 +3,39 @@
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
-DOCUMENTATION = '''
- name: dict_kv
- short_description: Convert a value to a dictionary with a single key-value pair
- version_added: 1.3.0
- author: Stanislav German-Evtushenko (@giner)
- description:
- - Convert a value to a dictionary with a single key-value pair.
- positional: key
- options:
- _input:
- description: The value for the single key-value pair.
- type: any
- required: true
- key:
- description: The key for the single key-value pair.
- type: any
- required: true
-'''
+DOCUMENTATION = r"""
+name: dict_kv
+short_description: Convert a value to a dictionary with a single key-value pair
+version_added: 1.3.0
+author: Stanislav German-Evtushenko (@giner)
+description:
+ - Convert a value to a dictionary with a single key-value pair.
+positional: key
+options:
+ _input:
+ description: The value for the single key-value pair.
+ type: any
+ required: true
+ key:
+ description: The key for the single key-value pair.
+ type: any
+ required: true
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Create a one-element dictionary from a value
ansible.builtin.debug:
msg: "{{ 'myvalue' | dict_kv('mykey') }}"
# Produces the dictionary {'mykey': 'myvalue'}
-'''
+"""
-RETURN = '''
- _value:
- description: A dictionary with a single key-value pair.
- type: dictionary
-'''
+RETURN = r"""
+_value:
+ description: A dictionary with a single key-value pair.
+ type: dictionary
+"""
def dict_kv(value, key):
diff --git a/plugins/filter/from_csv.py b/plugins/filter/from_csv.py
index 310138d496..e9a5d73e53 100644
--- a/plugins/filter/from_csv.py
+++ b/plugins/filter/from_csv.py
@@ -5,54 +5,53 @@
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
+from __future__ import annotations
-DOCUMENTATION = '''
- name: from_csv
- short_description: Converts CSV text input into list of dicts
- version_added: 2.3.0
- author: Andrew Pantuso (@Ajpantuso)
- description:
- - Converts CSV text input into list of dictionaries.
- options:
- _input:
- description: A string containing a CSV document.
- type: string
- required: true
- dialect:
- description:
- - The CSV dialect to use when parsing the CSV file.
- - Possible values include V(excel), V(excel-tab) or V(unix).
- type: str
- default: excel
- fieldnames:
- description:
- - A list of field names for every column.
- - This is needed if the CSV does not have a header.
- type: list
- elements: str
- delimiter:
- description:
- - A one-character string used to separate fields.
- - When using this parameter, you change the default value used by O(dialect).
- - The default value depends on the dialect used.
- type: str
- skipinitialspace:
- description:
- - Whether to ignore any whitespaces immediately following the delimiter.
- - When using this parameter, you change the default value used by O(dialect).
- - The default value depends on the dialect used.
- type: bool
- strict:
- description:
- - Whether to raise an exception on bad CSV input.
- - When using this parameter, you change the default value used by O(dialect).
- - The default value depends on the dialect used.
- type: bool
-'''
+DOCUMENTATION = r"""
+name: from_csv
+short_description: Converts CSV text input into list of dicts
+version_added: 2.3.0
+author: Andrew Pantuso (@Ajpantuso)
+description:
+ - Converts CSV text input into list of dictionaries.
+options:
+ _input:
+ description: A string containing a CSV document.
+ type: string
+ required: true
+ dialect:
+ description:
+ - The CSV dialect to use when parsing the CSV file.
+ - Possible values include V(excel), V(excel-tab) or V(unix).
+ type: str
+ default: excel
+ fieldnames:
+ description:
+ - A list of field names for every column.
+ - This is needed if the CSV does not have a header.
+ type: list
+ elements: str
+ delimiter:
+ description:
+ - A one-character string used to separate fields.
+ - When using this parameter, you change the default value used by O(dialect).
+ - The default value depends on the dialect used.
+ type: str
+ skipinitialspace:
+ description:
+ - Whether to ignore any whitespaces immediately following the delimiter.
+ - When using this parameter, you change the default value used by O(dialect).
+ - The default value depends on the dialect used.
+ type: bool
+ strict:
+ description:
+ - Whether to raise an exception on bad CSV input.
+ - When using this parameter, you change the default value used by O(dialect).
+ - The default value depends on the dialect used.
+ type: bool
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Parse a CSV file's contents
ansible.builtin.debug:
msg: >-
@@ -71,17 +70,16 @@ EXAMPLES = '''
# "Column 1": "bar",
# "Value": "42",
# }
-'''
+"""
-RETURN = '''
- _value:
- description: A list with one dictionary per row.
- type: list
- elements: dictionary
-'''
+RETURN = r"""
+_value:
+ description: A list with one dictionary per row.
+ type: list
+ elements: dictionary
+"""
from ansible.errors import AnsibleFilterError
-from ansible.module_utils.common.text.converters import to_native
from ansible_collections.community.general.plugins.module_utils.csv import (initialize_dialect, read_csv, CSVError,
DialectNotAvailableError,
@@ -99,7 +97,7 @@ def from_csv(data, dialect='excel', fieldnames=None, delimiter=None, skipinitial
try:
dialect = initialize_dialect(dialect, **dialect_params)
except (CustomDialectFailureError, DialectNotAvailableError) as e:
- raise AnsibleFilterError(to_native(e))
+ raise AnsibleFilterError(str(e))
reader = read_csv(data, dialect, fieldnames)
@@ -109,7 +107,7 @@ def from_csv(data, dialect='excel', fieldnames=None, delimiter=None, skipinitial
for row in reader:
data_list.append(row)
except CSVError as e:
- raise AnsibleFilterError("Unable to process file: %s" % to_native(e))
+ raise AnsibleFilterError(f"Unable to process file: {e}")
return data_list
diff --git a/plugins/filter/from_ini.py b/plugins/filter/from_ini.py
index 6fe83875e6..d77338df99 100644
--- a/plugins/filter/from_ini.py
+++ b/plugins/filter/from_ini.py
@@ -4,53 +4,51 @@
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import absolute_import, division, print_function
+from __future__ import annotations
-DOCUMENTATION = r'''
- name: from_ini
- short_description: Converts INI text input into a dictionary
- version_added: 8.2.0
- author: Steffen Scheib (@sscheib)
- description:
- - Converts INI text input into a dictionary.
- options:
- _input:
- description: A string containing an INI document.
- type: string
- required: true
-'''
+DOCUMENTATION = r"""
+name: from_ini
+short_description: Converts INI text input into a dictionary
+version_added: 8.2.0
+author: Steffen Scheib (@sscheib)
+description:
+ - Converts INI text input into a dictionary.
+options:
+ _input:
+ description: A string containing an INI document.
+ type: string
+ required: true
+"""
-EXAMPLES = r'''
- - name: Slurp an INI file
- ansible.builtin.slurp:
- src: /etc/rhsm/rhsm.conf
- register: rhsm_conf
+EXAMPLES = r"""
+- name: Slurp an INI file
+ ansible.builtin.slurp:
+ src: /etc/rhsm/rhsm.conf
+ register: rhsm_conf
- - name: Display the INI file as dictionary
- ansible.builtin.debug:
- var: rhsm_conf.content | b64decode | community.general.from_ini
+- name: Display the INI file as dictionary
+ ansible.builtin.debug:
+ var: rhsm_conf.content | b64decode | community.general.from_ini
- - name: Set a new dictionary fact with the contents of the INI file
- ansible.builtin.set_fact:
- rhsm_dict: >-
- {{
- rhsm_conf.content | b64decode | community.general.from_ini
- }}
-'''
+- name: Set a new dictionary fact with the contents of the INI file
+ ansible.builtin.set_fact:
+ rhsm_dict: >-
+ {{
+ rhsm_conf.content | b64decode | community.general.from_ini
+ }}
+"""
-RETURN = '''
- _value:
- description: A dictionary representing the INI file.
- type: dictionary
-'''
+RETURN = r"""
+_value:
+ description: A dictionary representing the INI file.
+ type: dictionary
+"""
-__metaclass__ = type
from ansible.errors import AnsibleFilterError
from ansible.module_utils.six import string_types
from ansible.module_utils.six.moves import StringIO
from ansible.module_utils.six.moves.configparser import ConfigParser
-from ansible.module_utils.common.text.converters import to_native
class IniParser(ConfigParser):
@@ -83,8 +81,7 @@ def from_ini(obj):
try:
parser.read_file(StringIO(obj))
except Exception as ex:
- raise AnsibleFilterError(f'from_ini failed to parse given string: '
- f'{to_native(ex)}', orig_exc=ex)
+ raise AnsibleFilterError(f'from_ini failed to parse given string: {ex}', orig_exc=ex)
return parser.as_dict()
diff --git a/plugins/filter/groupby_as_dict.py b/plugins/filter/groupby_as_dict.py
index 8e29c5863c..81a24a1e9f 100644
--- a/plugins/filter/groupby_as_dict.py
+++ b/plugins/filter/groupby_as_dict.py
@@ -3,32 +3,31 @@
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
-DOCUMENTATION = '''
- name: groupby_as_dict
- short_description: Transform a sequence of dictionaries to a dictionary where the dictionaries are indexed by an attribute
- version_added: 3.1.0
- author: Felix Fontein (@felixfontein)
- description:
- - Transform a sequence of dictionaries to a dictionary where the dictionaries are indexed by an attribute.
- - This filter is similar to the Jinja2 C(groupby) filter. Use the Jinja2 C(groupby) filter if you have multiple entries with the same value,
- or when you need a dictionary with list values, or when you need to use deeply nested attributes.
- positional: attribute
- options:
- _input:
- description: A list of dictionaries
- type: list
- elements: dictionary
- required: true
- attribute:
- description: The attribute to use as the key.
- type: str
- required: true
-'''
+DOCUMENTATION = r"""
+name: groupby_as_dict
+short_description: Transform a sequence of dictionaries to a dictionary where the dictionaries are indexed by an attribute
+version_added: 3.1.0
+author: Felix Fontein (@felixfontein)
+description:
+ - Transform a sequence of dictionaries to a dictionary where the dictionaries are indexed by an attribute.
+ - This filter is similar to the Jinja2 C(groupby) filter. Use the Jinja2 C(groupby) filter if you have multiple entries
+ with the same value, or when you need a dictionary with list values, or when you need to use deeply nested attributes.
+positional: attribute
+options:
+ _input:
+ description: A list of dictionaries.
+ type: list
+ elements: dictionary
+ required: true
+ attribute:
+ description: The attribute to use as the key.
+ type: str
+ required: true
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Arrange a list of dictionaries as a dictionary of dictionaries
ansible.builtin.debug:
msg: "{{ sequence | community.general.groupby_as_dict('key') }}"
@@ -46,13 +45,13 @@ EXAMPLES = '''
# other_value:
# key: other_value
# baz: bar
-'''
+"""
-RETURN = '''
- _value:
- description: A dictionary containing the dictionaries from the list as values.
- type: dictionary
-'''
+RETURN = r"""
+_value:
+ description: A dictionary containing the dictionaries from the list as values.
+ type: dictionary
+"""
from ansible.errors import AnsibleFilterError
from ansible.module_utils.common._collections_compat import Mapping, Sequence
diff --git a/plugins/filter/hashids.py b/plugins/filter/hashids.py
index 6f263b137c..76e6aaa3a5 100644
--- a/plugins/filter/hashids.py
+++ b/plugins/filter/hashids.py
@@ -4,8 +4,7 @@
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
+from __future__ import annotations
from ansible.errors import (
AnsibleError,
diff --git a/plugins/filter/jc.py b/plugins/filter/jc.py
index 2fe3ef9d73..6a2feb93f0 100644
--- a/plugins/filter/jc.py
+++ b/plugins/filter/jc.py
@@ -5,44 +5,43 @@
#
# contributed by Kelly Brazil
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
-DOCUMENTATION = '''
- name: jc
- short_description: Convert output of many shell commands and file-types to JSON
- version_added: 1.1.0
- author: Kelly Brazil (@kellyjonbrazil)
- description:
- - Convert output of many shell commands and file-types to JSON.
- - Uses the L(jc library,https://github.com/kellyjonbrazil/jc).
- positional: parser
- options:
- _input:
- description: The data to convert.
- type: string
- required: true
- parser:
- description:
- - The correct parser for the input data.
- - For example V(ifconfig).
- - "Note: use underscores instead of dashes (if any) in the parser module name."
- - See U(https://github.com/kellyjonbrazil/jc#parsers) for the latest list of parsers.
- type: string
- required: true
- quiet:
- description: Set to V(false) to not suppress warnings.
- type: boolean
- default: true
- raw:
- description: Set to V(true) to return pre-processed JSON.
- type: boolean
- default: false
- requirements:
- - jc installed as a Python library (U(https://pypi.org/project/jc/))
-'''
+DOCUMENTATION = r"""
+name: jc
+short_description: Convert output of many shell commands and file-types to JSON
+version_added: 1.1.0
+author: Kelly Brazil (@kellyjonbrazil)
+description:
+ - Convert output of many shell commands and file-types to JSON.
+ - Uses the L(jc library,https://github.com/kellyjonbrazil/jc).
+positional: parser
+options:
+ _input:
+ description: The data to convert.
+ type: string
+ required: true
+ parser:
+ description:
+ - The correct parser for the input data.
+ - For example V(ifconfig).
+ - 'Note: use underscores instead of dashes (if any) in the parser module name.'
+ - See U(https://github.com/kellyjonbrazil/jc#parsers) for the latest list of parsers.
+ type: string
+ required: true
+ quiet:
+ description: Set to V(false) to not suppress warnings.
+ type: boolean
+ default: true
+ raw:
+ description: Set to V(true) to return pre-processed JSON.
+ type: boolean
+ default: false
+requirements:
+ - jc installed as a Python library (U(https://pypi.org/project/jc/))
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Install the prereqs of the jc filter (jc Python package) on the Ansible controller
delegate_to: localhost
ansible.builtin.pip:
@@ -68,13 +67,13 @@ EXAMPLES = '''
# "operating_system": "GNU/Linux",
# "processor": "x86_64"
# }
-'''
+"""
-RETURN = '''
- _value:
- description: The processed output.
- type: any
-'''
+RETURN = r"""
+_value:
+ description: The processed output.
+ type: any
+"""
from ansible.errors import AnsibleError, AnsibleFilterError
import importlib
@@ -144,11 +143,11 @@ def jc_filter(data, parser, quiet=True, raw=False):
# old API (jc v1.17.7 and lower)
else:
- jc_parser = importlib.import_module('jc.parsers.' + parser)
+ jc_parser = importlib.import_module(f'jc.parsers.{parser}')
return jc_parser.parse(data, quiet=quiet, raw=raw)
except Exception as e:
- raise AnsibleFilterError('Error in jc filter plugin: %s' % e)
+ raise AnsibleFilterError(f'Error in jc filter plugin: {e}')
class FilterModule(object):
diff --git a/plugins/filter/json_diff.yml b/plugins/filter/json_diff.yml
new file mode 100644
index 0000000000..a370564d7a
--- /dev/null
+++ b/plugins/filter/json_diff.yml
@@ -0,0 +1,56 @@
+---
+# Copyright (c) Stanislav Meduna (@numo68)
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+DOCUMENTATION:
+ name: json_diff
+ short_description: Create a JSON patch by comparing two JSON files
+ description:
+ - This filter compares the input with the argument and computes a list of operations
+ that can be consumed by the P(community.general.json_patch_recipe#filter) to change the input
+ to the argument.
+ requirements:
+ - jsonpatch
+ version_added: 10.3.0
+ author:
+ - Stanislav Meduna (@numo68)
+ positional: target
+ options:
+ _input:
+ description: A list or a dictionary representing a source JSON object, or a string containing a JSON object.
+ type: raw
+ required: true
+ target:
+ description: A list or a dictionary representing a target JSON object, or a string containing a JSON object.
+ type: raw
+ required: true
+ seealso:
+ - name: RFC 6902
+ description: JavaScript Object Notation (JSON) Patch
+ link: https://datatracker.ietf.org/doc/html/rfc6902
+ - name: RFC 6901
+ description: JavaScript Object Notation (JSON) Pointer
+ link: https://datatracker.ietf.org/doc/html/rfc6901
+ - name: jsonpatch Python Package
+ description: A Python library for applying JSON patches
+ link: https://pypi.org/project/jsonpatch/
+
+RETURN:
+ _value:
+ description: A list of JSON patch operations to apply.
+ type: list
+ elements: dict
+
+EXAMPLES: |
+ - name: Compute a difference
+ ansible.builtin.debug:
+ msg: "{{ input | community.general.json_diff(target) }}"
+ vars:
+ input: {"foo": 1, "bar":{"baz": 2}, "baw": [1, 2, 3], "hello": "day"}
+ target: {"foo": 1, "bar": {"baz": 2}, "baw": [1, 3], "baq": {"baz": 2}, "hello": "night"}
+ # => [
+ # {"op": "add", "path": "/baq", "value": {"baz": 2}},
+ # {"op": "remove", "path": "/baw/1"},
+ # {"op": "replace", "path": "/hello", "value": "night"}
+ # ]
diff --git a/plugins/filter/json_patch.py b/plugins/filter/json_patch.py
new file mode 100644
index 0000000000..4600bfaf92
--- /dev/null
+++ b/plugins/filter/json_patch.py
@@ -0,0 +1,195 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) Stanislav Meduna (@numo68)
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import annotations
+from json import loads
+from typing import TYPE_CHECKING
+from ansible.errors import AnsibleFilterError
+
+__metaclass__ = type # pylint: disable=C0103
+
+if TYPE_CHECKING:
+ from typing import Any, Callable, Union
+
+try:
+ import jsonpatch
+
+except ImportError as exc:
+ HAS_LIB = False
+ JSONPATCH_IMPORT_ERROR = exc
+else:
+ HAS_LIB = True
+ JSONPATCH_IMPORT_ERROR = None
+
+OPERATIONS_AVAILABLE = ["add", "copy", "move", "remove", "replace", "test"]
+OPERATIONS_NEEDING_FROM = ["copy", "move"]
+OPERATIONS_NEEDING_VALUE = ["add", "replace", "test"]
+
+
+class FilterModule:
+ """Filter plugin."""
+
+ def check_json_object(self, filter_name: str, object_name: str, inp: Any):
+ if isinstance(inp, (str, bytes, bytearray)):
+ try:
+ return loads(inp)
+ except Exception as e:
+ raise AnsibleFilterError(
+ f"{filter_name}: could not decode JSON from {object_name}: {e}"
+ ) from e
+
+ if not isinstance(inp, (list, dict)):
+ raise AnsibleFilterError(
+ f"{filter_name}: {object_name} is not dictionary, list or string"
+ )
+
+ return inp
+
+ def check_patch_arguments(self, filter_name: str, args: dict):
+
+ if "op" not in args or not isinstance(args["op"], str):
+ raise AnsibleFilterError(f"{filter_name}: 'op' argument is not a string")
+
+ if args["op"] not in OPERATIONS_AVAILABLE:
+ raise AnsibleFilterError(
+ f"{filter_name}: unsupported 'op' argument: {args['op']}"
+ )
+
+ if "path" not in args or not isinstance(args["path"], str):
+ raise AnsibleFilterError(f"{filter_name}: 'path' argument is not a string")
+
+ if args["op"] in OPERATIONS_NEEDING_FROM:
+ if "from" not in args:
+ raise AnsibleFilterError(
+ f"{filter_name}: 'from' argument missing for '{args['op']}' operation"
+ )
+ if not isinstance(args["from"], str):
+ raise AnsibleFilterError(
+ f"{filter_name}: 'from' argument is not a string"
+ )
+
+ def json_patch(
+ self,
+ inp: Union[str, list, dict, bytes, bytearray],
+ op: str,
+ path: str,
+ value: Any = None,
+ **kwargs: dict,
+ ) -> Any:
+
+ if not HAS_LIB:
+ raise AnsibleFilterError(
+ "You need to install 'jsonpatch' package prior to running 'json_patch' filter"
+ ) from JSONPATCH_IMPORT_ERROR
+
+ args = {"op": op, "path": path}
+ from_arg = kwargs.pop("from", None)
+ fail_test = kwargs.pop("fail_test", False)
+
+ if kwargs:
+ raise AnsibleFilterError(
+ f"json_patch: unexpected keywords arguments: {', '.join(sorted(kwargs))}"
+ )
+
+ if not isinstance(fail_test, bool):
+ raise AnsibleFilterError("json_patch: 'fail_test' argument is not a bool")
+
+ if op in OPERATIONS_NEEDING_VALUE:
+ args["value"] = value
+ if op in OPERATIONS_NEEDING_FROM and from_arg is not None:
+ args["from"] = from_arg
+
+ inp = self.check_json_object("json_patch", "input", inp)
+ self.check_patch_arguments("json_patch", args)
+
+ result = None
+
+ try:
+ result = jsonpatch.apply_patch(inp, [args])
+ except jsonpatch.JsonPatchTestFailed as e:
+ if fail_test:
+ raise AnsibleFilterError(
+ f"json_patch: test operation failed: {e}"
+ ) from e
+ else:
+ pass
+ except Exception as e:
+ raise AnsibleFilterError(f"json_patch: patch failed: {e}") from e
+
+ return result
+
+ def json_patch_recipe(
+ self,
+ inp: Union[str, list, dict, bytes, bytearray],
+ operations: list,
+ /,
+ fail_test: bool = False,
+ ) -> Any:
+
+ if not HAS_LIB:
+ raise AnsibleFilterError(
+ "You need to install 'jsonpatch' package prior to running 'json_patch_recipe' filter"
+ ) from JSONPATCH_IMPORT_ERROR
+
+ if not isinstance(operations, list):
+ raise AnsibleFilterError(
+ "json_patch_recipe: 'operations' needs to be a list"
+ )
+
+ if not isinstance(fail_test, bool):
+ raise AnsibleFilterError("json_patch: 'fail_test' argument is not a bool")
+
+ result = None
+
+ inp = self.check_json_object("json_patch_recipe", "input", inp)
+ for args in operations:
+ self.check_patch_arguments("json_patch_recipe", args)
+
+ try:
+ result = jsonpatch.apply_patch(inp, operations)
+ except jsonpatch.JsonPatchTestFailed as e:
+ if fail_test:
+ raise AnsibleFilterError(
+ f"json_patch_recipe: test operation failed: {e}"
+ ) from e
+ else:
+ pass
+ except Exception as e:
+ raise AnsibleFilterError(f"json_patch_recipe: patch failed: {e}") from e
+
+ return result
+
+ def json_diff(
+ self,
+ inp: Union[str, list, dict, bytes, bytearray],
+ target: Union[str, list, dict, bytes, bytearray],
+ ) -> list:
+
+ if not HAS_LIB:
+ raise AnsibleFilterError(
+ "You need to install 'jsonpatch' package prior to running 'json_diff' filter"
+ ) from JSONPATCH_IMPORT_ERROR
+
+ inp = self.check_json_object("json_diff", "input", inp)
+ target = self.check_json_object("json_diff", "target", target)
+
+ try:
+ result = list(jsonpatch.make_patch(inp, target))
+ except Exception as e:
+ raise AnsibleFilterError(f"JSON diff failed: {e}") from e
+
+ return result
+
+ def filters(self) -> dict[str, Callable[..., Any]]:
+ """Map filter plugin names to their functions.
+
+ Returns:
+ dict: The filter plugin functions.
+ """
+ return {
+ "json_patch": self.json_patch,
+ "json_patch_recipe": self.json_patch_recipe,
+ "json_diff": self.json_diff,
+ }
diff --git a/plugins/filter/json_patch.yml b/plugins/filter/json_patch.yml
new file mode 100644
index 0000000000..42a0309202
--- /dev/null
+++ b/plugins/filter/json_patch.yml
@@ -0,0 +1,145 @@
+---
+# Copyright (c) Stanislav Meduna (@numo68)
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+DOCUMENTATION:
+ name: json_patch
+ short_description: Apply a JSON-Patch (RFC 6902) operation to an object
+ description:
+ - This filter applies a single JSON patch operation and returns a modified object.
+ - If the operation is a test, the filter returns an ummodified object if the test
+ succeeded and a V(none) value otherwise.
+ requirements:
+ - jsonpatch
+ version_added: 10.3.0
+ author:
+ - Stanislav Meduna (@numo68)
+ positional: op, path, value
+ options:
+ _input:
+ description: A list or a dictionary representing a JSON object, or a string containing a JSON object.
+ type: raw
+ required: true
+ op:
+ description: Operation to perform (see L(RFC 6902, https://datatracker.ietf.org/doc/html/rfc6902)).
+ type: str
+ choices: [add, copy, move, remove, replace, test]
+ required: true
+ path:
+ description: JSON Pointer path to the target location (see L(RFC 6901, https://datatracker.ietf.org/doc/html/rfc6901)).
+ type: str
+ required: true
+ value:
+ description: Value to use in the operation. Ignored for O(op=copy), O(op=move), and O(op=remove).
+ type: raw
+ from:
+ description: The source location for the copy and move operation. Mandatory
+ for O(op=copy) and O(op=move), ignored otherwise.
+ type: str
+ fail_test:
+ description: If V(false), a failed O(op=test) will return V(none). If V(true), the filter
+ invocation will fail with an error.
+ type: bool
+ default: false
+ seealso:
+ - name: RFC 6902
+ description: JavaScript Object Notation (JSON) Patch
+ link: https://datatracker.ietf.org/doc/html/rfc6902
+ - name: RFC 6901
+ description: JavaScript Object Notation (JSON) Pointer
+ link: https://datatracker.ietf.org/doc/html/rfc6901
+ - name: jsonpatch Python Package
+ description: A Python library for applying JSON patches
+ link: https://pypi.org/project/jsonpatch/
+
+RETURN:
+ _value:
+ description: A modified object or V(none) if O(op=test), O(fail_test=false) and the test failed.
+ type: any
+ returned: always
+
+EXAMPLES: |
+ - name: Insert a new element into an array at a specified index
+ ansible.builtin.debug:
+ msg: "{{ input | community.general.json_patch('add', '/1', {'baz': 'qux'}) }}"
+ vars:
+ input: ["foo": { "one": 1 }, "bar": { "two": 2 }]
+ # => [{"foo": {"one": 1}}, {"baz": "qux"}, {"bar": {"two": 2}}]
+
+ - name: Insert a new key into a dictionary
+ ansible.builtin.debug:
+ msg: "{{ input | community.general.json_patch('add', '/bar/baz', 'qux') }}"
+ vars:
+ input: { "foo": { "one": 1 }, "bar": { "two": 2 } }
+ # => {"foo": {"one": 1}, "bar": {"baz": "qux", "two": 2}}
+
+ - name: Input is a string
+ ansible.builtin.debug:
+ msg: "{{ input | community.general.json_patch('add', '/baz', 3) }}"
+ vars:
+ input: '{ "foo": { "one": 1 }, "bar": { "two": 2 } }'
+ # => {"foo": {"one": 1}, "bar": { "two": 2 }, "baz": 3}
+
+ - name: Existing key is replaced
+ ansible.builtin.debug:
+ msg: "{{ input | community.general.json_patch('add', '/bar', 'qux') }}"
+ vars:
+ input: { "foo": { "one": 1 }, "bar": { "two": 2 } }
+ # => {"foo": {"one": 1}, "bar": "qux"}
+
+ - name: Escaping tilde as ~0 and slash as ~1 in the path
+ ansible.builtin.debug:
+ msg: "{{ input | community.general.json_patch('add', '/~0~1', 'qux') }}"
+ vars:
+ input: {}
+ # => {"~/": "qux"}
+
+ - name: Add at the end of the array
+ ansible.builtin.debug:
+ msg: "{{ input | community.general.json_patch('add', '/-', 4) }}"
+ vars:
+ input: [1, 2, 3]
+ # => [1, 2, 3, 4]
+
+ - name: Remove a key
+ ansible.builtin.debug:
+ msg: "{{ input | community.general.json_patch('remove', '/bar') }}"
+ vars:
+ input: { "foo": { "one": 1 }, "bar": { "two": 2 } }
+ # => {"foo": {"one": 1} }
+
+ - name: Replace a value
+ ansible.builtin.debug:
+ msg: "{{ input | community.general.json_patch('replace', '/bar', 2) }}"
+ vars:
+ input: { "foo": { "one": 1 }, "bar": { "two": 2 } }
+ # => {"foo": {"one": 1}, "bar": 2}
+
+ - name: Copy a value
+ ansible.builtin.debug:
+ msg: "{{ input | community.general.json_patch('copy', '/baz', from='/bar') }}"
+ vars:
+ input: { "foo": { "one": 1 }, "bar": { "two": 2 } }
+ # => {"foo": {"one": 1}, "bar": { "two": 2 }, "baz": { "two": 2 }}
+
+ - name: Move a value
+ ansible.builtin.debug:
+ msg: "{{ input | community.general.json_patch('move', '/baz', from='/bar') }}"
+ vars:
+ input: { "foo": { "one": 1 }, "bar": { "two": 2 } }
+ # => {"foo": {"one": 1}, "baz": { "two": 2 }}
+
+ - name: Successful test
+ ansible.builtin.debug:
+ msg: "{{ input | community.general.json_patch('test', '/bar/two', 2) | ternary('OK', 'Failed') }}"
+ vars:
+ input: { "foo": { "one": 1 }, "bar": { "two": 2 } }
+ # => OK
+
+ - name: Unuccessful test
+ ansible.builtin.debug:
+ msg: "{{ input | community.general.json_patch('test', '/bar/two', 9) | ternary('OK', 'Failed') }}"
+ vars:
+ input: { "foo": { "one": 1 }, "bar": { "two": 2 } }
+ # => Failed
diff --git a/plugins/filter/json_patch_recipe.yml b/plugins/filter/json_patch_recipe.yml
new file mode 100644
index 0000000000..671600b941
--- /dev/null
+++ b/plugins/filter/json_patch_recipe.yml
@@ -0,0 +1,102 @@
+---
+# Copyright (c) Stanislav Meduna (@numo68)
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+DOCUMENTATION:
+ name: json_patch_recipe
+ short_description: Apply JSON-Patch (RFC 6902) operations to an object
+ description:
+ - This filter sequentially applies JSON patch operations and returns a modified object.
+ - If there is a test operation in the list, the filter continues if the test
+ succeeded and returns a V(none) value otherwise.
+ requirements:
+ - jsonpatch
+ version_added: 10.3.0
+ author:
+ - Stanislav Meduna (@numo68)
+ positional: operations, fail_test
+ options:
+ _input:
+ description: A list or a dictionary representing a JSON object, or a string containing a JSON object.
+ type: raw
+ required: true
+ operations:
+ description: A list of JSON patch operations to apply.
+ type: list
+ elements: dict
+ required: true
+ suboptions:
+ op:
+ description: Operation to perform (see L(RFC 6902, https://datatracker.ietf.org/doc/html/rfc6902)).
+ type: str
+ choices: [add, copy, move, remove, replace, test]
+ required: true
+ path:
+ description: JSON Pointer path to the target location (see L(RFC 6901, https://datatracker.ietf.org/doc/html/rfc6901)).
+ type: str
+ required: true
+ value:
+ description: Value to use in the operation. Ignored for O(operations[].op=copy), O(operations[].op=move), and O(operations[].op=remove).
+ type: raw
+ from:
+ description: The source location for the copy and move operation. Mandatory
+ for O(operations[].op=copy) and O(operations[].op=move), ignored otherwise.
+ type: str
+ fail_test:
+ description: If V(false), a failed O(operations[].op=test) will return V(none). If V(true), the filter
+ invocation will fail with an error.
+ type: bool
+ default: false
+ seealso:
+ - name: RFC 6902
+ description: JavaScript Object Notation (JSON) Patch
+ link: https://datatracker.ietf.org/doc/html/rfc6902
+ - name: RFC 6901
+ description: JavaScript Object Notation (JSON) Pointer
+ link: https://datatracker.ietf.org/doc/html/rfc6901
+ - name: jsonpatch Python Package
+ description: A Python library for applying JSON patches
+ link: https://pypi.org/project/jsonpatch/
+
+RETURN:
+ _value:
+ description: A modified object or V(none) if O(operations[].op=test), O(fail_test=false)
+ and the test failed.
+ type: any
+ returned: always
+
+EXAMPLES: |
+ - name: Apply a series of operations
+ ansible.builtin.debug:
+ msg: "{{ input | community.general.json_patch_recipe(operations) }}"
+ vars:
+ input: {}
+ operations:
+ - op: 'add'
+ path: '/foo'
+ value: 1
+ - op: 'add'
+ path: '/bar'
+ value: []
+ - op: 'add'
+ path: '/bar/-'
+ value: 2
+ - op: 'add'
+ path: '/bar/0'
+ value: 1
+ - op: 'remove'
+ path: '/bar/0'
+ - op: 'move'
+ from: '/foo'
+ path: '/baz'
+ - op: 'copy'
+ from: '/baz'
+ path: '/bax'
+ - op: 'copy'
+ from: '/baz'
+ path: '/bay'
+ - op: 'replace'
+ path: '/baz'
+ value: [10, 20, 30]
+ # => {"bar":[2],"bax":1,"bay":1,"baz":[10,20,30]}
diff --git a/plugins/filter/json_query.py b/plugins/filter/json_query.py
index 9e8fa4ef2e..9c0a83a481 100644
--- a/plugins/filter/json_query.py
+++ b/plugins/filter/json_query.py
@@ -3,32 +3,31 @@
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
-DOCUMENTATION = '''
- name: json_query
- short_description: Select a single element or a data subset from a complex data structure
- description:
- - This filter lets you query a complex JSON structure and iterate over it using a loop structure.
- positional: expr
- options:
- _input:
- description:
- - The JSON data to query.
- type: any
- required: true
- expr:
- description:
- - The query expression.
- - See U(http://jmespath.org/examples.html) for examples.
- type: string
- required: true
- requirements:
- - jmespath
-'''
+DOCUMENTATION = r"""
+name: json_query
+short_description: Select a single element or a data subset from a complex data structure
+description:
+ - This filter lets you query a complex JSON structure and iterate over it using a loop structure.
+positional: expr
+options:
+ _input:
+ description:
+ - The JSON data to query.
+ type: any
+ required: true
+ expr:
+ description:
+ - The query expression.
+ - See U(http://jmespath.org/examples.html) for examples.
+ type: string
+ required: true
+requirements:
+ - jmespath
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Define data to work on in the examples below
ansible.builtin.set_fact:
domain_definition:
@@ -99,13 +98,13 @@ EXAMPLES = '''
msg: "{{ domain_definition | to_json | from_json | community.general.json_query(server_name_query) }}"
vars:
server_name_query: "domain.server[?contains(name,'server1')].port"
-'''
+"""
-RETURN = '''
- _value:
- description: The result of the query.
- type: any
-'''
+RETURN = r"""
+_value:
+ description: The result of the query.
+ type: any
+"""
from ansible.errors import AnsibleError, AnsibleFilterError
@@ -125,10 +124,17 @@ def json_query(data, expr):
'json_query filter')
# Hack to handle Ansible Unsafe text, AnsibleMapping and AnsibleSequence
- # See issue: https://github.com/ansible-collections/community.general/issues/320
- jmespath.functions.REVERSE_TYPES_MAP['string'] = jmespath.functions.REVERSE_TYPES_MAP['string'] + ('AnsibleUnicode', 'AnsibleUnsafeText', )
- jmespath.functions.REVERSE_TYPES_MAP['array'] = jmespath.functions.REVERSE_TYPES_MAP['array'] + ('AnsibleSequence', )
- jmespath.functions.REVERSE_TYPES_MAP['object'] = jmespath.functions.REVERSE_TYPES_MAP['object'] + ('AnsibleMapping', )
+ # See issues https://github.com/ansible-collections/community.general/issues/320
+ # and https://github.com/ansible/ansible/issues/85600.
+ jmespath.functions.REVERSE_TYPES_MAP['string'] = jmespath.functions.REVERSE_TYPES_MAP['string'] + (
+ 'AnsibleUnicode', 'AnsibleUnsafeText', '_AnsibleTaggedStr',
+ )
+ jmespath.functions.REVERSE_TYPES_MAP['array'] = jmespath.functions.REVERSE_TYPES_MAP['array'] + (
+ 'AnsibleSequence', '_AnsibleLazyTemplateList',
+ )
+ jmespath.functions.REVERSE_TYPES_MAP['object'] = jmespath.functions.REVERSE_TYPES_MAP['object'] + (
+ 'AnsibleMapping', '_AnsibleLazyTemplateDict',
+ )
try:
return jmespath.search(expr, data)
except jmespath.exceptions.JMESPathError as e:
diff --git a/plugins/filter/keep_keys.py b/plugins/filter/keep_keys.py
index 97b706a950..98b34b4197 100644
--- a/plugins/filter/keep_keys.py
+++ b/plugins/filter/keep_keys.py
@@ -4,102 +4,101 @@
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
-DOCUMENTATION = '''
- name: keep_keys
- short_description: Keep specific keys from dictionaries in a list
- version_added: "9.1.0"
- author:
- - Vladimir Botka (@vbotka)
- - Felix Fontein (@felixfontein)
- description: This filter keeps only specified keys from a provided list of dictionaries.
- options:
- _input:
- description:
- - A list of dictionaries.
- - Top level keys must be strings.
- type: list
- elements: dictionary
- required: true
- target:
- description:
- - A single key or key pattern to keep, or a list of keys or keys patterns to keep.
- - If O(matching_parameter=regex) there must be exactly one pattern provided.
- type: raw
- required: true
- matching_parameter:
- description: Specify the matching option of target keys.
- type: str
- default: equal
- choices:
- equal: Matches keys of exactly one of the O(target) items.
- starts_with: Matches keys that start with one of the O(target) items.
- ends_with: Matches keys that end with one of the O(target) items.
- regex:
- - Matches keys that match the regular expresion provided in O(target).
- - In this case, O(target) must be a regex string or a list with single regex string.
-'''
+DOCUMENTATION = r"""
+name: keep_keys
+short_description: Keep specific keys from dictionaries in a list
+version_added: "9.1.0"
+author:
+ - Vladimir Botka (@vbotka)
+ - Felix Fontein (@felixfontein)
+description: This filter keeps only specified keys from a provided list of dictionaries.
+options:
+ _input:
+ description:
+ - A list of dictionaries.
+ - Top level keys must be strings.
+ type: list
+ elements: dictionary
+ required: true
+ target:
+ description:
+ - A single key or key pattern to keep, or a list of keys or keys patterns to keep.
+ - If O(matching_parameter=regex) there must be exactly one pattern provided.
+ type: raw
+ required: true
+ matching_parameter:
+ description: Specify the matching option of target keys.
+ type: str
+ default: equal
+ choices:
+ equal: Matches keys of exactly one of the O(target) items.
+ starts_with: Matches keys that start with one of the O(target) items.
+ ends_with: Matches keys that end with one of the O(target) items.
+ regex:
+ - Matches keys that match the regular expresion provided in O(target).
+ - In this case, O(target) must be a regex string or a list with single regex string.
+"""
-EXAMPLES = '''
- l:
+EXAMPLES = r"""
+- l:
- {k0_x0: A0, k1_x1: B0, k2_x2: [C0], k3_x3: foo}
- {k0_x0: A1, k1_x1: B1, k2_x2: [C1], k3_x3: bar}
# 1) By default match keys that equal any of the items in the target.
- t: [k0_x0, k1_x1]
+- t: [k0_x0, k1_x1]
r: "{{ l | community.general.keep_keys(target=t) }}"
# 2) Match keys that start with any of the items in the target.
- t: [k0, k1]
+- t: [k0, k1]
r: "{{ l | community.general.keep_keys(target=t, matching_parameter='starts_with') }}"
# 3) Match keys that end with any of the items in target.
- t: [x0, x1]
+- t: [x0, x1]
r: "{{ l | community.general.keep_keys(target=t, matching_parameter='ends_with') }}"
# 4) Match keys by the regex.
- t: ['^.*[01]_x.*$']
+- t: ['^.*[01]_x.*$']
r: "{{ l | community.general.keep_keys(target=t, matching_parameter='regex') }}"
# 5) Match keys by the regex.
- t: '^.*[01]_x.*$'
+- t: '^.*[01]_x.*$'
r: "{{ l | community.general.keep_keys(target=t, matching_parameter='regex') }}"
# The results of above examples 1-5 are all the same.
- r:
+- r:
- {k0_x0: A0, k1_x1: B0}
- {k0_x0: A1, k1_x1: B1}
# 6) By default match keys that equal the target.
- t: k0_x0
+- t: k0_x0
r: "{{ l | community.general.keep_keys(target=t) }}"
# 7) Match keys that start with the target.
- t: k0
+- t: k0
r: "{{ l | community.general.keep_keys(target=t, matching_parameter='starts_with') }}"
# 8) Match keys that end with the target.
- t: x0
+- t: x0
r: "{{ l | community.general.keep_keys(target=t, matching_parameter='ends_with') }}"
# 9) Match keys by the regex.
- t: '^.*0_x.*$'
+- t: '^.*0_x.*$'
r: "{{ l | community.general.keep_keys(target=t, matching_parameter='regex') }}"
# The results of above examples 6-9 are all the same.
- r:
+- r:
- {k0_x0: A0}
- {k0_x0: A1}
-'''
+"""
-RETURN = '''
- _value:
- description: The list of dictionaries with selected keys.
- type: list
- elements: dictionary
-'''
+RETURN = r"""
+_value:
+ description: The list of dictionaries with selected keys.
+ type: list
+ elements: dictionary
+"""
from ansible_collections.community.general.plugins.plugin_utils.keys_filter import (
_keys_filter_params,
diff --git a/plugins/filter/lists.py b/plugins/filter/lists.py
index d16f955c22..707ec9f1fe 100644
--- a/plugins/filter/lists.py
+++ b/plugins/filter/lists.py
@@ -3,8 +3,7 @@
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
+from __future__ import annotations
from ansible.errors import AnsibleFilterError
from ansible.module_utils.common.collections import is_sequence
diff --git a/plugins/filter/lists_difference.yml b/plugins/filter/lists_difference.yml
index 9806a9f0bc..630e77cf0a 100644
--- a/plugins/filter/lists_difference.yml
+++ b/plugins/filter/lists_difference.yml
@@ -31,7 +31,7 @@ EXAMPLES: |
list1: [1, 2, 5, 3, 4, 10]
list2: [1, 2, 3, 4, 5, 11, 99]
# => [10]
-
+
- name: Return the difference of list1, list2 and list3.
ansible.builtin.debug:
msg: "{{ [list1, list2, list3] | community.general.lists_difference(flatten=true) }}"
diff --git a/plugins/filter/lists_intersect.yml b/plugins/filter/lists_intersect.yml
index 8253463dee..d2ea9483b1 100644
--- a/plugins/filter/lists_intersect.yml
+++ b/plugins/filter/lists_intersect.yml
@@ -31,7 +31,7 @@ EXAMPLES: |
list1: [1, 2, 5, 3, 4, 10]
list2: [1, 2, 3, 4, 5, 11, 99]
# => [1, 2, 5, 3, 4]
-
+
- name: Return the intersection of list1, list2 and list3.
ansible.builtin.debug:
msg: "{{ [list1, list2, list3] | community.general.lists_intersect(flatten=true) }}"
diff --git a/plugins/filter/lists_mergeby.py b/plugins/filter/lists_mergeby.py
index 0e47d50172..b15df2e089 100644
--- a/plugins/filter/lists_mergeby.py
+++ b/plugins/filter/lists_mergeby.py
@@ -3,68 +3,61 @@
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
-DOCUMENTATION = '''
- name: lists_mergeby
- short_description: Merge two or more lists of dictionaries by a given attribute
- version_added: 2.0.0
- author: Vladimir Botka (@vbotka)
- description:
- - Merge two or more lists by attribute O(index). Optional
- parameters O(recursive) and O(list_merge) control the merging of
- the nested dictionaries and lists.
- - The function C(merge_hash) from C(ansible.utils.vars) is used.
- - To learn details on how to use the parameters O(recursive) and
- O(list_merge) see Ansible User's Guide chapter "Using filters to
- manipulate data" section R(Combining hashes/dictionaries, combine_filter) or the
- filter P(ansible.builtin.combine#filter).
+DOCUMENTATION = r"""
+name: lists_mergeby
+short_description: Merge two or more lists of dictionaries by a given attribute
+version_added: 2.0.0
+author: Vladimir Botka (@vbotka)
+description:
+ - Merge two or more lists by attribute O(index). Optional parameters O(recursive) and O(list_merge) control the merging
+ of the nested dictionaries and lists.
+ - The function C(merge_hash) from C(ansible.utils.vars) is used.
+ - To learn details on how to use the parameters O(recursive) and O(list_merge) see Ansible User's Guide chapter "Using filters
+ to manipulate data" section R(Combining hashes/dictionaries, combine_filter) or the filter P(ansible.builtin.combine#filter).
+positional: another_list, index
+options:
+ _input:
+ description:
+ - A list of dictionaries, or a list of lists of dictionaries.
+ - The required type of the C(elements) is set to C(raw) because all elements of O(_input) can be either dictionaries
+ or lists.
+ type: list
+ elements: raw
+ required: true
+ another_list:
+ description:
+ - Another list of dictionaries, or a list of lists of dictionaries.
+ - This parameter can be specified multiple times.
+ type: list
+ elements: raw
+ index:
+ description:
+ - The dictionary key that must be present in every dictionary in every list that is used to merge the lists.
+ type: string
+ required: true
+ recursive:
+ description:
+ - Should the combine recursively merge nested dictionaries (hashes).
+ - B(Note:) It does not depend on the value of the C(hash_behaviour) setting in C(ansible.cfg).
+ type: boolean
+ default: false
+ list_merge:
+ description:
+ - Modifies the behaviour when the dictionaries (hashes) to merge contain arrays/lists.
+ type: string
+ default: replace
+ choices:
+ - replace
+ - keep
+ - append
+ - prepend
+ - append_rp
+ - prepend_rp
+"""
- positional: another_list, index
- options:
- _input:
- description:
- - A list of dictionaries, or a list of lists of dictionaries.
- - The required type of the C(elements) is set to C(raw)
- because all elements of O(_input) can be either dictionaries
- or lists.
- type: list
- elements: raw
- required: true
- another_list:
- description:
- - Another list of dictionaries, or a list of lists of dictionaries.
- - This parameter can be specified multiple times.
- type: list
- elements: raw
- index:
- description:
- - The dictionary key that must be present in every dictionary in every list that is used to
- merge the lists.
- type: string
- required: true
- recursive:
- description:
- - Should the combine recursively merge nested dictionaries (hashes).
- - "B(Note:) It does not depend on the value of the C(hash_behaviour) setting in C(ansible.cfg)."
- type: boolean
- default: false
- list_merge:
- description:
- - Modifies the behaviour when the dictionaries (hashes) to merge contain arrays/lists.
- type: string
- default: replace
- choices:
- - replace
- - keep
- - append
- - prepend
- - append_rp
- - prepend_rp
-'''
-
-EXAMPLES = '''
+EXAMPLES = r"""
# Some results below are manually formatted for better readability. The
# dictionaries' keys will be sorted alphabetically in real output.
@@ -193,14 +186,14 @@ EXAMPLES = '''
# r:
# - {index: a, foo: {x:1, y: 3, z: 4}}
# - {index: b, foo: [Y1, Y2]}
-'''
+"""
-RETURN = '''
- _value:
- description: The merged list.
- type: list
- elements: dictionary
-'''
+RETURN = r"""
+_value:
+ description: The merged list.
+ type: list
+ elements: dictionary
+"""
from ansible.errors import AnsibleFilterError
from ansible.module_utils.six import string_types
diff --git a/plugins/filter/lists_symmetric_difference.yml b/plugins/filter/lists_symmetric_difference.yml
index d985704c2c..abd8caab8a 100644
--- a/plugins/filter/lists_symmetric_difference.yml
+++ b/plugins/filter/lists_symmetric_difference.yml
@@ -31,7 +31,7 @@ EXAMPLES: |
list1: [1, 2, 5, 3, 4, 10]
list2: [1, 2, 3, 4, 5, 11, 99]
# => [10, 11, 99]
-
+
- name: Return the symmetric difference of list1, list2 and list3.
ansible.builtin.debug:
msg: "{{ [list1, list2, list3] | community.general.lists_symmetric_difference(flatten=true) }}"
diff --git a/plugins/filter/lists_union.yml b/plugins/filter/lists_union.yml
index ba69090836..8c1ffb4f87 100644
--- a/plugins/filter/lists_union.yml
+++ b/plugins/filter/lists_union.yml
@@ -32,7 +32,7 @@ EXAMPLES: |
list2: [1, 2, 3, 4, 5, 11, 99]
list3: [1, 2, 3, 4, 5, 10, 99, 101]
# => [1, 2, 5, 3, 4, 10, 11, 99, 101]
-
+
- name: Return the union of list1 and list2.
ansible.builtin.debug:
msg: "{{ [list1, list2] | community.general.lists_union(flatten=true) }}"
diff --git a/plugins/filter/random_mac.py b/plugins/filter/random_mac.py
index 662c62b07c..1ece58230c 100644
--- a/plugins/filter/random_mac.py
+++ b/plugins/filter/random_mac.py
@@ -4,28 +4,27 @@
# SPDX-License-Identifier: GPL-3.0-or-later
# Make coding more python3-ish
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
-DOCUMENTATION = '''
- name: random_mac
- short_description: Generate a random MAC address
- description:
- - Generates random networking interfaces MAC addresses for a given prefix.
- options:
- _input:
- description: A string prefix to use as a basis for the random MAC generated.
- type: string
- required: true
- seed:
- description:
- - A randomization seed to initialize the process, used to get repeatable results.
- - If no seed is provided, a system random source such as C(/dev/urandom) is used.
- required: false
- type: string
-'''
+DOCUMENTATION = r"""
+name: random_mac
+short_description: Generate a random MAC address
+description:
+ - Generates random networking interfaces MAC addresses for a given prefix.
+options:
+ _input:
+ description: A string prefix to use as a basis for the random MAC generated.
+ type: string
+ required: true
+ seed:
+ description:
+ - A randomization seed to initialize the process, used to get repeatable results.
+ - If no seed is provided, a system random source such as C(/dev/urandom) is used.
+ required: false
+ type: string
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Random MAC given a prefix
ansible.builtin.debug:
msg: "{{ '52:54:00' | community.general.random_mac }}"
@@ -34,13 +33,13 @@ EXAMPLES = '''
- name: With a seed
ansible.builtin.debug:
msg: "{{ '52:54:00' | community.general.random_mac(seed=inventory_hostname) }}"
-'''
+"""
-RETURN = '''
- _value:
- description: The generated MAC.
- type: string
-'''
+RETURN = r"""
+_value:
+ description: The generated MAC.
+ type: string
+"""
import re
from random import Random, SystemRandom
diff --git a/plugins/filter/remove_keys.py b/plugins/filter/remove_keys.py
index 7a4d912d34..2058803138 100644
--- a/plugins/filter/remove_keys.py
+++ b/plugins/filter/remove_keys.py
@@ -4,102 +4,101 @@
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
-DOCUMENTATION = '''
- name: remove_keys
- short_description: Remove specific keys from dictionaries in a list
- version_added: "9.1.0"
- author:
- - Vladimir Botka (@vbotka)
- - Felix Fontein (@felixfontein)
- description: This filter removes only specified keys from a provided list of dictionaries.
- options:
- _input:
- description:
- - A list of dictionaries.
- - Top level keys must be strings.
- type: list
- elements: dictionary
- required: true
- target:
- description:
- - A single key or key pattern to remove, or a list of keys or keys patterns to remove.
- - If O(matching_parameter=regex) there must be exactly one pattern provided.
- type: raw
- required: true
- matching_parameter:
- description: Specify the matching option of target keys.
- type: str
- default: equal
- choices:
- equal: Matches keys of exactly one of the O(target) items.
- starts_with: Matches keys that start with one of the O(target) items.
- ends_with: Matches keys that end with one of the O(target) items.
- regex:
- - Matches keys that match the regular expresion provided in O(target).
- - In this case, O(target) must be a regex string or a list with single regex string.
-'''
+DOCUMENTATION = r"""
+name: remove_keys
+short_description: Remove specific keys from dictionaries in a list
+version_added: "9.1.0"
+author:
+ - Vladimir Botka (@vbotka)
+ - Felix Fontein (@felixfontein)
+description: This filter removes only specified keys from a provided list of dictionaries.
+options:
+ _input:
+ description:
+ - A list of dictionaries.
+ - Top level keys must be strings.
+ type: list
+ elements: dictionary
+ required: true
+ target:
+ description:
+ - A single key or key pattern to remove, or a list of keys or keys patterns to remove.
+ - If O(matching_parameter=regex) there must be exactly one pattern provided.
+ type: raw
+ required: true
+ matching_parameter:
+ description: Specify the matching option of target keys.
+ type: str
+ default: equal
+ choices:
+ equal: Matches keys of exactly one of the O(target) items.
+ starts_with: Matches keys that start with one of the O(target) items.
+ ends_with: Matches keys that end with one of the O(target) items.
+ regex:
+ - Matches keys that match the regular expresion provided in O(target).
+ - In this case, O(target) must be a regex string or a list with single regex string.
+"""
-EXAMPLES = '''
- l:
+EXAMPLES = r"""
+- l:
- {k0_x0: A0, k1_x1: B0, k2_x2: [C0], k3_x3: foo}
- {k0_x0: A1, k1_x1: B1, k2_x2: [C1], k3_x3: bar}
# 1) By default match keys that equal any of the items in the target.
- t: [k0_x0, k1_x1]
+- t: [k0_x0, k1_x1]
r: "{{ l | community.general.remove_keys(target=t) }}"
# 2) Match keys that start with any of the items in the target.
- t: [k0, k1]
+- t: [k0, k1]
r: "{{ l | community.general.remove_keys(target=t, matching_parameter='starts_with') }}"
# 3) Match keys that end with any of the items in target.
- t: [x0, x1]
+- t: [x0, x1]
r: "{{ l | community.general.remove_keys(target=t, matching_parameter='ends_with') }}"
# 4) Match keys by the regex.
- t: ['^.*[01]_x.*$']
+- t: ['^.*[01]_x.*$']
r: "{{ l | community.general.remove_keys(target=t, matching_parameter='regex') }}"
# 5) Match keys by the regex.
- t: '^.*[01]_x.*$'
+- t: '^.*[01]_x.*$'
r: "{{ l | community.general.remove_keys(target=t, matching_parameter='regex') }}"
# The results of above examples 1-5 are all the same.
- r:
+- r:
- {k2_x2: [C0], k3_x3: foo}
- {k2_x2: [C1], k3_x3: bar}
# 6) By default match keys that equal the target.
- t: k0_x0
+- t: k0_x0
r: "{{ l | community.general.remove_keys(target=t) }}"
# 7) Match keys that start with the target.
- t: k0
+- t: k0
r: "{{ l | community.general.remove_keys(target=t, matching_parameter='starts_with') }}"
# 8) Match keys that end with the target.
- t: x0
+- t: x0
r: "{{ l | community.general.remove_keys(target=t, matching_parameter='ends_with') }}"
# 9) Match keys by the regex.
- t: '^.*0_x.*$'
+- t: '^.*0_x.*$'
r: "{{ l | community.general.remove_keys(target=t, matching_parameter='regex') }}"
# The results of above examples 6-9 are all the same.
- r:
+- r:
- {k1_x1: B0, k2_x2: [C0], k3_x3: foo}
- {k1_x1: B1, k2_x2: [C1], k3_x3: bar}
-'''
+"""
-RETURN = '''
- _value:
- description: The list of dictionaries with selected keys removed.
- type: list
- elements: dictionary
-'''
+RETURN = r"""
+_value:
+ description: The list of dictionaries with selected keys removed.
+ type: list
+ elements: dictionary
+"""
from ansible_collections.community.general.plugins.plugin_utils.keys_filter import (
_keys_filter_params,
diff --git a/plugins/filter/replace_keys.py b/plugins/filter/replace_keys.py
index 70b264eba6..69fe02832b 100644
--- a/plugins/filter/replace_keys.py
+++ b/plugins/filter/replace_keys.py
@@ -4,132 +4,131 @@
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
-DOCUMENTATION = '''
- name: replace_keys
- short_description: Replace specific keys in a list of dictionaries
- version_added: "9.1.0"
- author:
- - Vladimir Botka (@vbotka)
- - Felix Fontein (@felixfontein)
- description: This filter replaces specified keys in a provided list of dictionaries.
- options:
- _input:
+DOCUMENTATION = r"""
+name: replace_keys
+short_description: Replace specific keys in a list of dictionaries
+version_added: "9.1.0"
+author:
+ - Vladimir Botka (@vbotka)
+ - Felix Fontein (@felixfontein)
+description: This filter replaces specified keys in a provided list of dictionaries.
+options:
+ _input:
+ description:
+ - A list of dictionaries.
+ - Top level keys must be strings.
+ type: list
+ elements: dictionary
+ required: true
+ target:
+ description:
+ - A list of dictionaries with attributes C(before) and C(after).
+ - The value of O(target[].after) replaces key matching O(target[].before).
+ type: list
+ elements: dictionary
+ required: true
+ suboptions:
+ before:
description:
- - A list of dictionaries.
- - Top level keys must be strings.
- type: list
- elements: dictionary
- required: true
- target:
- description:
- - A list of dictionaries with attributes C(before) and C(after).
- - The value of O(target[].after) replaces key matching O(target[].before).
- type: list
- elements: dictionary
- required: true
- suboptions:
- before:
- description:
- - A key or key pattern to change.
- - The interpretation of O(target[].before) depends on O(matching_parameter).
- - For a key that matches multiple O(target[].before)s, the B(first) matching O(target[].after) will be used.
- type: str
- after:
- description: A matching key change to.
- type: str
- matching_parameter:
- description: Specify the matching option of target keys.
+ - A key or key pattern to change.
+ - The interpretation of O(target[].before) depends on O(matching_parameter).
+ - For a key that matches multiple O(target[].before)s, the B(first) matching O(target[].after) is used.
type: str
- default: equal
- choices:
- equal: Matches keys of exactly one of the O(target[].before) items.
- starts_with: Matches keys that start with one of the O(target[].before) items.
- ends_with: Matches keys that end with one of the O(target[].before) items.
- regex: Matches keys that match one of the regular expressions provided in O(target[].before).
-'''
+ after:
+ description: A matching key change to.
+ type: str
+ matching_parameter:
+ description: Specify the matching option of target keys.
+ type: str
+ default: equal
+ choices:
+ equal: Matches keys of exactly one of the O(target[].before) items.
+ starts_with: Matches keys that start with one of the O(target[].before) items.
+ ends_with: Matches keys that end with one of the O(target[].before) items.
+ regex: Matches keys that match one of the regular expressions provided in O(target[].before).
+"""
-EXAMPLES = '''
- l:
+EXAMPLES = r"""
+- l:
- {k0_x0: A0, k1_x1: B0, k2_x2: [C0], k3_x3: foo}
- {k0_x0: A1, k1_x1: B1, k2_x2: [C1], k3_x3: bar}
# 1) By default, replace keys that are equal any of the attributes before.
- t:
+- t:
- {before: k0_x0, after: a0}
- {before: k1_x1, after: a1}
r: "{{ l | community.general.replace_keys(target=t) }}"
# 2) Replace keys that starts with any of the attributes before.
- t:
+- t:
- {before: k0, after: a0}
- {before: k1, after: a1}
r: "{{ l | community.general.replace_keys(target=t, matching_parameter='starts_with') }}"
# 3) Replace keys that ends with any of the attributes before.
- t:
+- t:
- {before: x0, after: a0}
- {before: x1, after: a1}
r: "{{ l | community.general.replace_keys(target=t, matching_parameter='ends_with') }}"
# 4) Replace keys that match any regex of the attributes before.
- t:
+- t:
- {before: "^.*0_x.*$", after: a0}
- {before: "^.*1_x.*$", after: a1}
r: "{{ l | community.general.replace_keys(target=t, matching_parameter='regex') }}"
# The results of above examples 1-4 are all the same.
- r:
+- r:
- {a0: A0, a1: B0, k2_x2: [C0], k3_x3: foo}
- {a0: A1, a1: B1, k2_x2: [C1], k3_x3: bar}
# 5) If more keys match the same attribute before the last one will be used.
- t:
+- t:
- {before: "^.*_x.*$", after: X}
r: "{{ l | community.general.replace_keys(target=t, matching_parameter='regex') }}"
# gives
- r:
+- r:
- X: foo
- X: bar
# 6) If there are items with equal attribute before the first one will be used.
- t:
+- t:
- {before: "^.*_x.*$", after: X}
- {before: "^.*_x.*$", after: Y}
r: "{{ l | community.general.replace_keys(target=t, matching_parameter='regex') }}"
# gives
- r:
+- r:
- X: foo
- X: bar
# 7) If there are more matches for a key the first one will be used.
- l:
+- l:
- {aaa1: A, bbb1: B, ccc1: C}
- {aaa2: D, bbb2: E, ccc2: F}
- t:
+- t:
- {before: a, after: X}
- {before: aa, after: Y}
r: "{{ l | community.general.replace_keys(target=t, matching_parameter='starts_with') }}"
# gives
- r:
+- r:
- {X: A, bbb1: B, ccc1: C}
- {X: D, bbb2: E, ccc2: F}
-'''
+"""
-RETURN = '''
- _value:
- description: The list of dictionaries with replaced keys.
- type: list
- elements: dictionary
-'''
+RETURN = r"""
+_value:
+ description: The list of dictionaries with replaced keys.
+ type: list
+ elements: dictionary
+"""
from ansible_collections.community.general.plugins.plugin_utils.keys_filter import (
_keys_filter_params,
diff --git a/plugins/filter/reveal_ansible_type.py b/plugins/filter/reveal_ansible_type.py
index 0d7ed3e88c..f2f0d6780b 100644
--- a/plugins/filter/reveal_ansible_type.py
+++ b/plugins/filter/reveal_ansible_type.py
@@ -3,25 +3,24 @@
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
-DOCUMENTATION = '''
- name: reveal_ansible_type
- short_description: Return input type
- version_added: "9.2.0"
- author: Vladimir Botka (@vbotka)
- description: This filter returns input type.
- options:
- _input:
- description: Input data.
- type: raw
- required: true
- alias:
- description: Data type aliases.
- default: {}
- type: dictionary
-'''
+DOCUMENTATION = r"""
+name: reveal_ansible_type
+short_description: Return input type
+version_added: "9.2.0"
+author: Vladimir Botka (@vbotka)
+description: This filter returns input type.
+options:
+ _input:
+ description: Input data.
+ type: raw
+ required: true
+ alias:
+ description: Data type aliases.
+ default: {}
+ type: dictionary
+"""
EXAMPLES = r"""
# Substitution converts str to AnsibleUnicode or _AnsibleTaggedStr
@@ -125,11 +124,11 @@ EXAMPLES = r"""
# result => list[bool|dict|float|int|list|str]
"""
-RETURN = '''
- _value:
- description: Type of the data.
- type: str
-'''
+RETURN = r"""
+_value:
+ description: Type of the data.
+ type: str
+"""
from ansible_collections.community.general.plugins.plugin_utils.ansible_type import _ansible_type
diff --git a/plugins/filter/time.py b/plugins/filter/time.py
index 25970cd260..e8a867a1fe 100644
--- a/plugins/filter/time.py
+++ b/plugins/filter/time.py
@@ -3,8 +3,7 @@
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
import re
from ansible.errors import AnsibleFilterError
diff --git a/plugins/filter/to_ini.py b/plugins/filter/to_ini.py
index bdf2dde270..4be1a684e7 100644
--- a/plugins/filter/to_ini.py
+++ b/plugins/filter/to_ini.py
@@ -4,36 +4,36 @@
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import absolute_import, division, print_function
+from __future__ import annotations
-DOCUMENTATION = r'''
- name: to_ini
- short_description: Converts a dictionary to the INI file format
- version_added: 8.2.0
- author: Steffen Scheib (@sscheib)
- description:
- - Converts a dictionary to the INI file format.
- options:
- _input:
- description: The dictionary that should be converted to the INI format.
- type: dictionary
- required: true
-'''
+DOCUMENTATION = r"""
+name: to_ini
+short_description: Converts a dictionary to the INI file format
+version_added: 8.2.0
+author: Steffen Scheib (@sscheib)
+description:
+ - Converts a dictionary to the INI file format.
+options:
+ _input:
+ description: The dictionary that should be converted to the INI format.
+ type: dictionary
+ required: true
+"""
-EXAMPLES = r'''
- - name: Define a dictionary
- ansible.builtin.set_fact:
- my_dict:
- section_name:
- key_name: 'key value'
+EXAMPLES = r"""
+- name: Define a dictionary
+ ansible.builtin.set_fact:
+ my_dict:
+ section_name:
+ key_name: 'key value'
- another_section:
- connection: 'ssh'
+ another_section:
+ connection: 'ssh'
- - name: Write dictionary to INI file
- ansible.builtin.copy:
- dest: /tmp/test.ini
- content: '{{ my_dict | community.general.to_ini }}'
+- name: Write dictionary to INI file
+ ansible.builtin.copy:
+ dest: /tmp/test.ini
+ content: '{{ my_dict | community.general.to_ini }}'
# /tmp/test.ini will look like this:
# [section_name]
@@ -41,22 +41,19 @@ EXAMPLES = r'''
#
# [another_section]
# connection = ssh
-'''
+"""
-RETURN = r'''
- _value:
- description: A string formatted as INI file.
- type: string
-'''
+RETURN = r"""
+_value:
+ description: A string formatted as INI file.
+ type: string
+"""
-__metaclass__ = type
-
from ansible.errors import AnsibleFilterError
from ansible.module_utils.common._collections_compat import Mapping
from ansible.module_utils.six.moves import StringIO
from ansible.module_utils.six.moves.configparser import ConfigParser
-from ansible.module_utils.common.text.converters import to_native
class IniParser(ConfigParser):
@@ -79,7 +76,7 @@ def to_ini(obj):
ini_parser.read_dict(obj)
except Exception as ex:
raise AnsibleFilterError('to_ini failed to parse given dict:'
- f'{to_native(ex)}', orig_exc=ex)
+ f'{ex}', orig_exc=ex)
# catching empty dicts
if obj == dict():
diff --git a/plugins/filter/to_prettytable.py b/plugins/filter/to_prettytable.py
new file mode 100644
index 0000000000..269ac318ff
--- /dev/null
+++ b/plugins/filter/to_prettytable.py
@@ -0,0 +1,411 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2025, Timur Gadiev
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+
+DOCUMENTATION = r"""
+name: to_prettytable
+short_description: Format a list of dictionaries as an ASCII table
+version_added: "10.7.0"
+author: Timur Gadiev (@tgadiev)
+description:
+ - This filter takes a list of dictionaries and formats it as an ASCII table using the I(prettytable) Python library.
+requirements:
+ - prettytable
+options:
+ _input:
+ description: A list of dictionaries to format.
+ type: list
+ elements: dictionary
+ required: true
+ column_order:
+ description: List of column names to specify the order of columns in the table.
+ type: list
+ elements: string
+ header_names:
+ description: List of custom header names to use instead of dictionary keys.
+ type: list
+ elements: string
+ column_alignments:
+ description:
+ - Dictionary where keys are column names and values are alignment settings. Valid alignment values are C(left), C(center),
+ C(right), C(l), C(c), or C(r).
+ - "For example, V({'name': 'left', 'id': 'right'}) aligns the C(name) column to the left and the C(id) column to the
+ right."
+ type: dictionary
+"""
+
+EXAMPLES = r"""
+- name: Set a list of users
+ ansible.builtin.set_fact:
+ users:
+ - name: Alice
+ age: 25
+ role: admin
+ - name: Bob
+ age: 30
+ role: user
+
+- name: Display a list of users as a table
+ ansible.builtin.debug:
+ msg: >-
+ {{
+ users | community.general.to_prettytable
+ }}
+
+- name: Display a table with custom column ordering
+ ansible.builtin.debug:
+ msg: >-
+ {{
+ users | community.general.to_prettytable(
+ column_order=['role', 'name', 'age']
+ )
+ }}
+
+- name: Display a table with selective column output (only show name and role fields)
+ ansible.builtin.debug:
+ msg: >-
+ {{
+ users | community.general.to_prettytable(
+ column_order=['name', 'role']
+ )
+ }}
+
+- name: Display a table with custom headers
+ ansible.builtin.debug:
+ msg: >-
+ {{
+ users | community.general.to_prettytable(
+ header_names=['User Name', 'User Age', 'User Role']
+ )
+ }}
+
+- name: Display a table with custom alignments
+ ansible.builtin.debug:
+ msg: >-
+ {{
+ users | community.general.to_prettytable(
+ column_alignments={'name': 'center', 'age': 'right', 'role': 'left'}
+ )
+ }}
+
+- name: Combine multiple options
+ ansible.builtin.debug:
+ msg: >-
+ {{
+ users | community.general.to_prettytable(
+ column_order=['role', 'name', 'age'],
+ header_names=['Position', 'Full Name', 'Years'],
+ column_alignments={'name': 'center', 'age': 'right', 'role': 'left'}
+ )
+ }}
+"""
+
+RETURN = r"""
+_value:
+ description: The formatted ASCII table.
+ type: string
+"""
+
+try:
+ import prettytable
+ HAS_PRETTYTABLE = True
+except ImportError:
+ HAS_PRETTYTABLE = False
+
+from ansible.errors import AnsibleFilterError
+from ansible.module_utils._text import to_text
+from ansible.module_utils.six import string_types
+
+
+class TypeValidationError(AnsibleFilterError):
+ """Custom exception for type validation errors.
+
+ Args:
+ obj: The object with incorrect type
+ expected: Description of expected type
+ """
+ def __init__(self, obj, expected):
+ type_name = "string" if isinstance(obj, string_types) else type(obj).__name__
+ super().__init__(f"Expected {expected}, got a {type_name}")
+
+
+def _validate_list_param(param, param_name, ensure_strings=True):
+ """Validate a parameter is a list and optionally ensure all elements are strings.
+
+ Args:
+ param: The parameter to validate
+ param_name: The name of the parameter for error messages
+ ensure_strings: Whether to check that all elements are strings
+
+ Raises:
+ AnsibleFilterError: If validation fails
+ """
+ # Map parameter names to their original error message format
+ error_messages = {
+ "column_order": "a list of column names",
+ "header_names": "a list of header names"
+ }
+
+ # Use the specific error message if available, otherwise use a generic one
+ error_msg = error_messages.get(param_name, f"a list for {param_name}")
+
+ if not isinstance(param, list):
+ raise TypeValidationError(param, error_msg)
+
+ if ensure_strings:
+ for item in param:
+ if not isinstance(item, string_types):
+ # Maintain original error message format
+ if param_name == "column_order":
+ error_msg = "a string for column name"
+ elif param_name == "header_names":
+ error_msg = "a string for header name"
+ else:
+ error_msg = f"a string for {param_name} element"
+ raise TypeValidationError(item, error_msg)
+
+
+def _match_key(item_dict, lookup_key):
+ """Find a matching key in a dictionary, handling type conversion.
+
+ Args:
+ item_dict: Dictionary to search in
+ lookup_key: Key to look for, possibly needing type conversion
+
+ Returns:
+ The matching key or None if no match found
+ """
+ # Direct key match
+ if lookup_key in item_dict:
+ return lookup_key
+
+ # Try boolean conversion for 'true'/'false' strings
+ if isinstance(lookup_key, string_types):
+ if lookup_key.lower() == 'true' and True in item_dict:
+ return True
+ if lookup_key.lower() == 'false' and False in item_dict:
+ return False
+
+ # Try numeric conversion for string numbers
+ if lookup_key.isdigit() and int(lookup_key) in item_dict:
+ return int(lookup_key)
+
+ # No match found
+ return None
+
+
+def _build_key_maps(data):
+ """Build mappings between string keys and original keys.
+
+ Args:
+ data: List of dictionaries with keys to map
+
+ Returns:
+ Tuple of (key_map, reverse_key_map)
+ """
+ key_map = {}
+ reverse_key_map = {}
+
+ # Check if the data list is not empty
+ if not data:
+ return key_map, reverse_key_map
+
+ first_dict = data[0]
+ for orig_key in first_dict.keys():
+ # Store string version of the key
+ str_key = to_text(orig_key)
+ key_map[str_key] = orig_key
+ # Also store lowercase version for case-insensitive lookups
+ reverse_key_map[str_key.lower()] = orig_key
+
+ return key_map, reverse_key_map
+
+
+def _configure_alignments(table, field_names, column_alignments):
+ """Configure column alignments for the table.
+
+ Args:
+ table: The PrettyTable instance to configure
+ field_names: List of field names to align
+ column_alignments: Dict of column alignments
+ """
+ valid_alignments = {"left", "center", "right", "l", "c", "r"}
+
+ if not isinstance(column_alignments, dict):
+ return
+
+ for col_name, alignment in column_alignments.items():
+ if col_name in field_names:
+ # We already validated alignment is a string and a valid value in the main function
+ # Just apply it here
+ alignment = alignment.lower()
+ table.align[col_name] = alignment[0]
+
+
+def to_prettytable(data, *args, **kwargs):
+ """Convert a list of dictionaries to an ASCII table.
+
+ Args:
+ data: List of dictionaries to format
+ *args: Optional list of column names to specify column order
+ **kwargs: Optional keyword arguments:
+ - column_order: List of column names to specify the order
+ - header_names: List of custom header names
+ - column_alignments: Dict of column alignments (left, center, right)
+
+ Returns:
+ String containing the ASCII table
+ """
+ if not HAS_PRETTYTABLE:
+ raise AnsibleFilterError(
+ 'You need to install "prettytable" Python module to use this filter'
+ )
+
+ # === Input validation ===
+ # Validate list type
+ if not isinstance(data, list):
+ raise TypeValidationError(data, "a list of dictionaries")
+
+ # Validate dictionary items if list is not empty
+ if data and not all(isinstance(item, dict) for item in data):
+ invalid_item = next((item for item in data if not isinstance(item, dict)), None)
+ raise TypeValidationError(invalid_item, "all items in the list to be dictionaries")
+
+ # Get sample dictionary to determine fields - empty if no data
+ sample_dict = data[0] if data else {}
+ max_fields = len(sample_dict)
+
+ # === Process column order ===
+ # Handle both positional and keyword column_order
+ column_order = kwargs.pop('column_order', None)
+
+ # Check for conflict between args and column_order
+ if args and column_order is not None:
+ raise AnsibleFilterError("Cannot use both positional arguments and the 'column_order' keyword argument")
+
+ # Use positional args if provided
+ if args:
+ column_order = list(args)
+
+ # Validate column_order
+ if column_order is not None:
+ _validate_list_param(column_order, "column_order")
+
+ # Validate column_order doesn't exceed the number of fields (skip if data is empty)
+ if data and len(column_order) > max_fields:
+ raise AnsibleFilterError(
+ f"'column_order' has more elements ({len(column_order)}) than available fields in data ({max_fields})")
+
+ # === Process headers ===
+ # Determine field names and ensure they are strings
+ if column_order:
+ field_names = column_order
+ else:
+ # Use field names from first dictionary, ensuring all are strings
+ field_names = [to_text(k) for k in sample_dict]
+
+ # Process custom headers
+ header_names = kwargs.pop('header_names', None)
+ if header_names is not None:
+ _validate_list_param(header_names, "header_names")
+
+ # Validate header_names doesn't exceed the number of fields (skip if data is empty)
+ if data and len(header_names) > max_fields:
+ raise AnsibleFilterError(
+ f"'header_names' has more elements ({len(header_names)}) than available fields in data ({max_fields})")
+
+ # Validate that column_order and header_names have the same size if both provided
+ if column_order is not None and len(column_order) != len(header_names):
+ raise AnsibleFilterError(
+ f"'column_order' and 'header_names' must have the same number of elements. "
+ f"Got {len(column_order)} columns and {len(header_names)} headers.")
+
+ # === Process alignments ===
+ # Get column alignments and validate
+ column_alignments = kwargs.pop('column_alignments', {})
+ valid_alignments = {"left", "center", "right", "l", "c", "r"}
+
+ # Validate column_alignments is a dictionary
+ if not isinstance(column_alignments, dict):
+ raise TypeValidationError(column_alignments, "a dictionary for column_alignments")
+
+ # Validate column_alignments keys and values
+ for key, value in column_alignments.items():
+ # Check that keys are strings
+ if not isinstance(key, string_types):
+ raise TypeValidationError(key, "a string for column_alignments key")
+
+ # Check that values are strings
+ if not isinstance(value, string_types):
+ raise TypeValidationError(value, "a string for column_alignments value")
+
+ # Check that values are valid alignments
+ if value.lower() not in valid_alignments:
+ raise AnsibleFilterError(
+ f"Invalid alignment '{value}' in 'column_alignments'. "
+ f"Valid alignments are: {', '.join(sorted(valid_alignments))}")
+
+ # Validate column_alignments doesn't have more keys than fields (skip if data is empty)
+ if data and len(column_alignments) > max_fields:
+ raise AnsibleFilterError(
+ f"'column_alignments' has more elements ({len(column_alignments)}) than available fields in data ({max_fields})")
+
+ # Check for unknown parameters
+ if kwargs:
+ raise AnsibleFilterError(f"Unknown parameter(s) for to_prettytable filter: {', '.join(sorted(kwargs))}")
+
+ # === Build the table ===
+ table = prettytable.PrettyTable()
+
+ # Set the field names for display
+ display_names = header_names if header_names is not None else field_names
+ table.field_names = [to_text(name) for name in display_names]
+
+ # Configure alignments after setting field_names
+ _configure_alignments(table, display_names, column_alignments)
+
+ # Build key maps only if not using explicit column_order and we have data
+ key_map = {}
+ reverse_key_map = {}
+ if not column_order and data: # Only needed when using original dictionary keys and we have data
+ key_map, reverse_key_map = _build_key_maps(data)
+
+ # If we have an empty list with no custom parameters, return a simple empty table
+ if not data and not column_order and not header_names and not column_alignments:
+ return "++\n++"
+
+ # Process each row if we have data
+ for item in data:
+ row = []
+ for col in field_names:
+ # Try direct mapping first
+ if col in key_map:
+ row.append(item.get(key_map[col], ""))
+ else:
+ # Try to find a matching key in the item
+ matched_key = _match_key(item, col)
+ if matched_key is not None:
+ row.append(item.get(matched_key, ""))
+ else:
+ # Try case-insensitive lookup as last resort
+ lower_col = col.lower() if isinstance(col, string_types) else str(col).lower()
+ if lower_col in reverse_key_map:
+ row.append(item.get(reverse_key_map[lower_col], ""))
+ else:
+ # No match found
+ row.append("")
+ table.add_row(row)
+
+ return to_text(table)
+
+
+class FilterModule(object):
+ """Ansible core jinja2 filters."""
+
+ def filters(self):
+ return {
+ 'to_prettytable': to_prettytable
+ }
diff --git a/plugins/filter/unicode_normalize.py b/plugins/filter/unicode_normalize.py
index 7bdf0d0ab4..aed7979de8 100644
--- a/plugins/filter/unicode_normalize.py
+++ b/plugins/filter/unicode_normalize.py
@@ -4,48 +4,47 @@
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
+from __future__ import annotations
-DOCUMENTATION = '''
- name: unicode_normalize
- short_description: Normalizes unicode strings to facilitate comparison of characters with normalized forms
- version_added: 3.7.0
- author: Andrew Pantuso (@Ajpantuso)
- description:
- - Normalizes unicode strings to facilitate comparison of characters with normalized forms.
- positional: form
- options:
- _input:
- description: A unicode string.
- type: string
- required: true
- form:
- description:
- - The normal form to use.
- - See U(https://docs.python.org/3/library/unicodedata.html#unicodedata.normalize) for details.
- type: string
- default: NFC
- choices:
- - NFC
- - NFD
- - NFKC
- - NFKD
-'''
+DOCUMENTATION = r"""
+name: unicode_normalize
+short_description: Normalizes unicode strings to facilitate comparison of characters with normalized forms
+version_added: 3.7.0
+author: Andrew Pantuso (@Ajpantuso)
+description:
+ - Normalizes unicode strings to facilitate comparison of characters with normalized forms.
+positional: form
+options:
+ _input:
+ description: A unicode string.
+ type: string
+ required: true
+ form:
+ description:
+ - The normal form to use.
+ - See U(https://docs.python.org/3/library/unicodedata.html#unicodedata.normalize) for details.
+ type: string
+ default: NFC
+ choices:
+ - NFC
+ - NFD
+ - NFKC
+ - NFKD
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Normalize unicode string
ansible.builtin.set_fact:
dictionary: "{{ 'ä' | community.general.unicode_normalize('NFKD') }}"
# The resulting string has length 2: one letter is 'a', the other
# the diacritic combiner.
-'''
+"""
-RETURN = '''
- _value:
- description: The normalized unicode string of the specified normal form.
- type: string
-'''
+RETURN = r"""
+_value:
+ description: The normalized unicode string of the specified normal form.
+ type: string
+"""
from unicodedata import normalize
diff --git a/plugins/filter/version_sort.py b/plugins/filter/version_sort.py
index 09eedbf563..f3fb30035a 100644
--- a/plugins/filter/version_sort.py
+++ b/plugins/filter/version_sort.py
@@ -3,37 +3,36 @@
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
-DOCUMENTATION = '''
- name: version_sort
- short_description: Sort a list according to version order instead of pure alphabetical one
- version_added: 2.2.0
- author: Eric L. (@ericzolf)
- description:
- - Sort a list according to version order instead of pure alphabetical one.
- options:
- _input:
- description: A list of strings to sort.
- type: list
- elements: string
- required: true
-'''
+DOCUMENTATION = r"""
+name: version_sort
+short_description: Sort a list according to version order instead of pure alphabetical one
+version_added: 2.2.0
+author: Eric L. (@ericzolf)
+description:
+ - Sort a list according to version order instead of pure alphabetical one.
+options:
+ _input:
+ description: A list of strings to sort.
+ type: list
+ elements: string
+ required: true
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Convert list of tuples into dictionary
ansible.builtin.set_fact:
dictionary: "{{ ['2.1', '2.10', '2.9'] | community.general.version_sort }}"
# Result is ['2.1', '2.9', '2.10']
-'''
+"""
-RETURN = '''
- _value:
- description: The list of strings sorted by version.
- type: list
- elements: string
-'''
+RETURN = r"""
+_value:
+ description: The list of strings sorted by version.
+ type: list
+ elements: string
+"""
from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
diff --git a/plugins/inventory/cobbler.py b/plugins/inventory/cobbler.py
index 664380da8f..677e1a3ad5 100644
--- a/plugins/inventory/cobbler.py
+++ b/plugins/inventory/cobbler.py
@@ -3,122 +3,137 @@
# Copyright (c) 2020 Ansible Project
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
-DOCUMENTATION = '''
- author: Orion Poplawski (@opoplawski)
- name: cobbler
- short_description: Cobbler inventory source
- version_added: 1.0.0
+DOCUMENTATION = r"""
+author: Orion Poplawski (@opoplawski)
+name: cobbler
+short_description: Cobbler inventory source
+version_added: 1.0.0
+description:
+ - Get inventory hosts from the cobbler service.
+ - 'Uses a configuration file as an inventory source, it must end in C(.cobbler.yml) or C(.cobbler.yaml) and have a C(plugin:
+ cobbler) entry.'
+ - Adds the primary IP addresses to C(cobbler_ipv4_address) and C(cobbler_ipv6_address) host variables if defined in Cobbler.
+ The primary IP address is defined as the management interface if defined, or the interface who's DNS name matches the
+ hostname of the system, or else the first interface found.
+extends_documentation_fragment:
+ - inventory_cache
+options:
+ plugin:
+ description: The name of this plugin, it should always be set to V(community.general.cobbler) for this plugin to recognize
+ it as its own.
+ type: string
+ required: true
+ choices: ['cobbler', 'community.general.cobbler']
+ url:
+ description: URL to cobbler.
+ type: string
+ default: 'http://cobbler/cobbler_api'
+ env:
+ - name: COBBLER_SERVER
+ user:
+ description: Cobbler authentication user.
+ type: string
+ required: false
+ env:
+ - name: COBBLER_USER
+ password:
+ description: Cobbler authentication password.
+ type: string
+ required: false
+ env:
+ - name: COBBLER_PASSWORD
+ cache_fallback:
+ description: Fallback to cached results if connection to cobbler fails.
+ type: boolean
+ default: false
+ connection_timeout:
+ description: Timeout to connect to cobbler server.
+ type: int
+ required: false
+ version_added: 10.7.0
+ exclude_mgmt_classes:
+ description: Management classes to exclude from inventory.
+ type: list
+ default: []
+ elements: str
+ version_added: 7.4.0
+ exclude_profiles:
description:
- - Get inventory hosts from the cobbler service.
- - "Uses a configuration file as an inventory source, it must end in C(.cobbler.yml) or C(.cobbler.yaml) and have a C(plugin: cobbler) entry."
- - Adds the primary IP addresses to C(cobbler_ipv4_address) and C(cobbler_ipv6_address) host variables if defined in Cobbler. The primary IP address is
- defined as the management interface if defined, or the interface who's DNS name matches the hostname of the system, or else the first interface found.
- extends_documentation_fragment:
- - inventory_cache
- options:
- plugin:
- description: The name of this plugin, it should always be set to V(community.general.cobbler) for this plugin to recognize it as it's own.
- type: string
- required: true
- choices: [ 'cobbler', 'community.general.cobbler' ]
- url:
- description: URL to cobbler.
- type: string
- default: 'http://cobbler/cobbler_api'
- env:
- - name: COBBLER_SERVER
- user:
- description: Cobbler authentication user.
- type: string
- required: false
- env:
- - name: COBBLER_USER
- password:
- description: Cobbler authentication password.
- type: string
- required: false
- env:
- - name: COBBLER_PASSWORD
- cache_fallback:
- description: Fallback to cached results if connection to cobbler fails.
- type: boolean
- default: false
- exclude_mgmt_classes:
- description: Management classes to exclude from inventory.
- type: list
- default: []
- elements: str
- version_added: 7.4.0
- exclude_profiles:
- description:
- - Profiles to exclude from inventory.
- - Ignored if O(include_profiles) is specified.
- type: list
- default: []
- elements: str
- include_mgmt_classes:
- description: Management classes to include from inventory.
- type: list
- default: []
- elements: str
- version_added: 7.4.0
- include_profiles:
- description:
- - Profiles to include from inventory.
- - If specified, all other profiles will be excluded.
- - O(exclude_profiles) is ignored if O(include_profiles) is specified.
- type: list
- default: []
- elements: str
- version_added: 4.4.0
- inventory_hostname:
- description:
- - What to use for the ansible inventory hostname.
- - By default the networking hostname is used if defined, otherwise the DNS name of the management or first non-static interface.
- - If set to V(system), the cobbler system name is used.
- type: str
- choices: [ 'hostname', 'system' ]
- default: hostname
- version_added: 7.1.0
- group_by:
- description: Keys to group hosts by.
- type: list
- elements: string
- default: [ 'mgmt_classes', 'owners', 'status' ]
- group:
- description: Group to place all hosts into.
- default: cobbler
- group_prefix:
- description: Prefix to apply to cobbler groups.
- default: cobbler_
- want_facts:
- description: Toggle, if V(true) the plugin will retrieve host facts from the server.
- type: boolean
- default: true
- want_ip_addresses:
- description:
- - Toggle, if V(true) the plugin will add a C(cobbler_ipv4_addresses) and C(cobbleer_ipv6_addresses) dictionary to the defined O(group) mapping
- interface DNS names to IP addresses.
- type: boolean
- default: true
- version_added: 7.1.0
-'''
+ - Profiles to exclude from inventory.
+ - Ignored if O(include_profiles) is specified.
+ type: list
+ default: []
+ elements: str
+ include_mgmt_classes:
+ description: Management classes to include from inventory.
+ type: list
+ default: []
+ elements: str
+ version_added: 7.4.0
+ include_profiles:
+ description:
+ - Profiles to include from inventory.
+ - If specified, all other profiles are excluded.
+ - O(exclude_profiles) is ignored if O(include_profiles) is specified.
+ type: list
+ default: []
+ elements: str
+ version_added: 4.4.0
+ inventory_hostname:
+ description:
+ - What to use for the ansible inventory hostname.
+ - By default the networking hostname is used if defined, otherwise the DNS name of the management or first non-static
+ interface.
+ - If set to V(system), the cobbler system name is used.
+ type: str
+ choices: ['hostname', 'system']
+ default: hostname
+ version_added: 7.1.0
+ group_by:
+ description: Keys to group hosts by.
+ type: list
+ elements: string
+ default: ['mgmt_classes', 'owners', 'status']
+ group:
+ description: Group to place all hosts into.
+ default: cobbler
+ group_prefix:
+ description: Prefix to apply to cobbler groups.
+ default: cobbler_
+ want_facts:
+ description: Toggle, if V(true) the plugin retrieves all host facts from the server.
+ type: boolean
+ default: true
+ want_ip_addresses:
+ description:
+ - Toggle, if V(true) the plugin adds a C(cobbler_ipv4_addresses) and C(cobbler_ipv6_addresses) dictionary to the
+ defined O(group) mapping interface DNS names to IP addresses.
+ type: boolean
+ default: true
+ version_added: 7.1.0
+ facts_level:
+ description:
+ - Set to V(normal) to gather only system-level variables.
+ - Set to V(as_rendered) to gather all variables as rolled up by Cobbler.
+ type: string
+ choices: ['normal', 'as_rendered']
+ default: normal
+ version_added: 10.7.0
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
# my.cobbler.yml
plugin: community.general.cobbler
url: http://cobbler/cobbler_api
user: ansible-tester
password: secure
-'''
+"""
import socket
from ansible.errors import AnsibleError
-from ansible.module_utils.common.text.converters import to_text
from ansible.plugins.inventory import BaseInventoryPlugin, Cacheable, to_safe_group_name
from ansible.module_utils.six import text_type
@@ -136,6 +151,18 @@ except ImportError:
HAS_XMLRPC_CLIENT = False
+class TimeoutTransport (xmlrpc_client.SafeTransport):
+ def __init__(self, timeout=socket._GLOBAL_DEFAULT_TIMEOUT):
+ super(TimeoutTransport, self).__init__()
+ self._timeout = timeout
+ self.context = None
+
+ def make_connection(self, host):
+ conn = xmlrpc_client.SafeTransport.make_connection(self, host)
+ conn.timeout = self._timeout
+ return conn
+
+
class InventoryModule(BaseInventoryPlugin, Cacheable):
''' Host inventory parser for ansible using cobbler as source. '''
@@ -144,7 +171,9 @@ class InventoryModule(BaseInventoryPlugin, Cacheable):
def __init__(self):
super(InventoryModule, self).__init__()
self.cache_key = None
- self.connection = None
+
+ if not HAS_XMLRPC_CLIENT:
+ raise AnsibleError('Could not import xmlrpc client library')
def verify_file(self, path):
valid = False
@@ -155,18 +184,6 @@ class InventoryModule(BaseInventoryPlugin, Cacheable):
self.display.vvv('Skipping due to inventory source not ending in "cobbler.yaml" nor "cobbler.yml"')
return valid
- def _get_connection(self):
- if not HAS_XMLRPC_CLIENT:
- raise AnsibleError('Could not import xmlrpc client library')
-
- if self.connection is None:
- self.display.vvvv('Connecting to %s\n' % self.cobbler_url)
- self.connection = xmlrpc_client.Server(self.cobbler_url, allow_none=True)
- self.token = None
- if self.get_option('user') is not None:
- self.token = self.connection.login(text_type(self.get_option('user')), text_type(self.get_option('password')))
- return self.connection
-
def _init_cache(self):
if self.cache_key not in self._cache:
self._cache[self.cache_key] = {}
@@ -180,12 +197,11 @@ class InventoryModule(BaseInventoryPlugin, Cacheable):
def _get_profiles(self):
if not self.use_cache or 'profiles' not in self._cache.get(self.cache_key, {}):
- c = self._get_connection()
try:
if self.token is not None:
- data = c.get_profiles(self.token)
+ data = self.cobbler.get_profiles(self.token)
else:
- data = c.get_profiles()
+ data = self.cobbler.get_profiles()
except (socket.gaierror, socket.error, xmlrpc_client.ProtocolError):
self._reload_cache()
else:
@@ -196,12 +212,20 @@ class InventoryModule(BaseInventoryPlugin, Cacheable):
def _get_systems(self):
if not self.use_cache or 'systems' not in self._cache.get(self.cache_key, {}):
- c = self._get_connection()
try:
if self.token is not None:
- data = c.get_systems(self.token)
+ data = self.cobbler.get_systems(self.token)
else:
- data = c.get_systems()
+ data = self.cobbler.get_systems()
+
+ # If more facts are requested, gather them all from Cobbler
+ if self.facts_level == "as_rendered":
+ for i, host in enumerate(data):
+ self.display.vvvv(f"Gathering all facts for {host['name']}\n")
+ if self.token is not None:
+ data[i] = self.cobbler.get_system_as_rendered(host['name'], self.token)
+ else:
+ data[i] = self.cobbler.get_system_as_rendered(host['name'])
except (socket.gaierror, socket.error, xmlrpc_client.ProtocolError):
self._reload_cache()
else:
@@ -211,7 +235,7 @@ class InventoryModule(BaseInventoryPlugin, Cacheable):
return self._cache[self.cache_key]['systems']
def _add_safe_group_name(self, group, child=None):
- group_name = self.inventory.add_group(to_safe_group_name('%s%s' % (self.get_option('group_prefix'), group.lower().replace(" ", ""))))
+ group_name = self.inventory.add_group(to_safe_group_name(f"{self.get_option('group_prefix')}{group.lower().replace(' ', '')}"))
if child is not None:
self.inventory.add_child(group_name, child)
return group_name
@@ -231,6 +255,17 @@ class InventoryModule(BaseInventoryPlugin, Cacheable):
# get connection host
self.cobbler_url = self.get_option('url')
+ self.display.vvvv(f'Connecting to {self.cobbler_url}\n')
+
+ if 'connection_timeout' in self._options:
+ self.cobbler = xmlrpc_client.Server(self.cobbler_url, allow_none=True,
+ transport=TimeoutTransport(timeout=self.get_option('connection_timeout')))
+ else:
+ self.cobbler = xmlrpc_client.Server(self.cobbler_url, allow_none=True)
+ self.token = None
+ if self.get_option('user') is not None:
+ self.token = self.cobbler.login(text_type(self.get_option('user')), text_type(self.get_option('password')))
+
self.cache_key = self.get_cache_key(path)
self.use_cache = cache and self.get_option('cache')
@@ -240,19 +275,20 @@ class InventoryModule(BaseInventoryPlugin, Cacheable):
self.include_profiles = self.get_option('include_profiles')
self.group_by = self.get_option('group_by')
self.inventory_hostname = self.get_option('inventory_hostname')
+ self.facts_level = self.get_option('facts_level')
for profile in self._get_profiles():
if profile['parent']:
- self.display.vvvv('Processing profile %s with parent %s\n' % (profile['name'], profile['parent']))
+ self.display.vvvv(f"Processing profile {profile['name']} with parent {profile['parent']}\n")
if not self._exclude_profile(profile['parent']):
parent_group_name = self._add_safe_group_name(profile['parent'])
- self.display.vvvv('Added profile parent group %s\n' % parent_group_name)
+ self.display.vvvv(f'Added profile parent group {parent_group_name}\n')
if not self._exclude_profile(profile['name']):
group_name = self._add_safe_group_name(profile['name'])
- self.display.vvvv('Added profile group %s\n' % group_name)
+ self.display.vvvv(f'Added profile group {group_name}\n')
self.inventory.add_child(parent_group_name, group_name)
else:
- self.display.vvvv('Processing profile %s without parent\n' % profile['name'])
+ self.display.vvvv(f"Processing profile {profile['name']} without parent\n")
# Create a hierarchy of profile names
profile_elements = profile['name'].split('-')
i = 0
@@ -260,12 +296,12 @@ class InventoryModule(BaseInventoryPlugin, Cacheable):
profile_group = '-'.join(profile_elements[0:i + 1])
profile_group_child = '-'.join(profile_elements[0:i + 2])
if self._exclude_profile(profile_group):
- self.display.vvvv('Excluding profile %s\n' % profile_group)
+ self.display.vvvv(f'Excluding profile {profile_group}\n')
break
group_name = self._add_safe_group_name(profile_group)
- self.display.vvvv('Added profile group %s\n' % group_name)
+ self.display.vvvv(f'Added profile group {group_name}\n')
child_group_name = self._add_safe_group_name(profile_group_child)
- self.display.vvvv('Added profile child group %s to %s\n' % (child_group_name, group_name))
+ self.display.vvvv(f'Added profile child group {child_group_name} to {group_name}\n')
self.inventory.add_child(group_name, child_group_name)
i = i + 1
@@ -273,7 +309,7 @@ class InventoryModule(BaseInventoryPlugin, Cacheable):
self.group = to_safe_group_name(self.get_option('group'))
if self.group is not None and self.group != '':
self.inventory.add_group(self.group)
- self.display.vvvv('Added site group %s\n' % self.group)
+ self.display.vvvv(f'Added site group {self.group}\n')
ip_addresses = {}
ipv6_addresses = {}
@@ -286,14 +322,14 @@ class InventoryModule(BaseInventoryPlugin, Cacheable):
interfaces = host['interfaces']
if set(host['mgmt_classes']) & set(self.include_mgmt_classes):
- self.display.vvvv('Including host %s in mgmt_classes %s\n' % (host['name'], host['mgmt_classes']))
+ self.display.vvvv(f"Including host {host['name']} in mgmt_classes {host['mgmt_classes']}\n")
else:
if self._exclude_profile(host['profile']):
- self.display.vvvv('Excluding host %s in profile %s\n' % (host['name'], host['profile']))
+ self.display.vvvv(f"Excluding host {host['name']} in profile {host['profile']}\n")
continue
if set(host['mgmt_classes']) & set(self.exclude_mgmt_classes):
- self.display.vvvv('Excluding host %s in mgmt_classes %s\n' % (host['name'], host['mgmt_classes']))
+ self.display.vvvv(f"Excluding host {host['name']} in mgmt_classes {host['mgmt_classes']}\n")
continue
# hostname is often empty for non-static IP hosts
@@ -303,31 +339,31 @@ class InventoryModule(BaseInventoryPlugin, Cacheable):
this_dns_name = ivalue.get('dns_name', None)
if this_dns_name is not None and this_dns_name != "":
hostname = make_unsafe(this_dns_name)
- self.display.vvvv('Set hostname to %s from %s\n' % (hostname, iname))
+ self.display.vvvv(f'Set hostname to {hostname} from {iname}\n')
if hostname == '':
- self.display.vvvv('Cannot determine hostname for host %s, skipping\n' % host['name'])
+ self.display.vvvv(f"Cannot determine hostname for host {host['name']}, skipping\n")
continue
self.inventory.add_host(hostname)
- self.display.vvvv('Added host %s hostname %s\n' % (host['name'], hostname))
+ self.display.vvvv(f"Added host {host['name']} hostname {hostname}\n")
# Add host to profile group
if host['profile'] != '':
group_name = self._add_safe_group_name(host['profile'], child=hostname)
- self.display.vvvv('Added host %s to profile group %s\n' % (hostname, group_name))
+ self.display.vvvv(f'Added host {hostname} to profile group {group_name}\n')
else:
- self.display.warning('Host %s has an empty profile\n' % (hostname))
+ self.display.warning(f'Host {hostname} has an empty profile\n')
# Add host to groups specified by group_by fields
for group_by in self.group_by:
- if host[group_by] == '<>':
+ if host[group_by] == '<>' or host[group_by] == '':
groups = []
else:
groups = [host[group_by]] if isinstance(host[group_by], str) else host[group_by]
for group in groups:
group_name = self._add_safe_group_name(group, child=hostname)
- self.display.vvvv('Added host %s to group_by %s group %s\n' % (hostname, group_by, group_name))
+ self.display.vvvv(f'Added host {hostname} to group_by {group_by} group {group_name}\n')
# Add to group for this inventory
if self.group is not None:
@@ -377,7 +413,7 @@ class InventoryModule(BaseInventoryPlugin, Cacheable):
try:
self.inventory.set_variable(hostname, 'cobbler', make_unsafe(host))
except ValueError as e:
- self.display.warning("Could not set host info for %s: %s" % (hostname, to_text(e)))
+ self.display.warning(f"Could not set host info for {hostname}: {e}")
if self.get_option('want_ip_addresses'):
self.inventory.set_variable(self.group, 'cobbler_ipv4_addresses', make_unsafe(ip_addresses))
diff --git a/plugins/inventory/gitlab_runners.py b/plugins/inventory/gitlab_runners.py
index bd29e8d310..7a10b553a2 100644
--- a/plugins/inventory/gitlab_runners.py
+++ b/plugins/inventory/gitlab_runners.py
@@ -4,64 +4,65 @@
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
+from __future__ import annotations
-__metaclass__ = type
-DOCUMENTATION = '''
- name: gitlab_runners
- author:
- - Stefan Heitmüller (@morph027)
- short_description: Ansible dynamic inventory plugin for GitLab runners.
- requirements:
- - python-gitlab > 1.8.0
- extends_documentation_fragment:
- - constructed
- description:
- - Reads inventories from the GitLab API.
- - Uses a YAML configuration file gitlab_runners.[yml|yaml].
- options:
- plugin:
- description: The name of this plugin, it should always be set to 'gitlab_runners' for this plugin to recognize it as it's own.
- type: str
- required: true
- choices:
- - gitlab_runners
- - community.general.gitlab_runners
- server_url:
- description: The URL of the GitLab server, with protocol (i.e. http or https).
- env:
- - name: GITLAB_SERVER_URL
- version_added: 1.0.0
- type: str
- required: true
- api_token:
- description: GitLab token for logging in.
- env:
- - name: GITLAB_API_TOKEN
- version_added: 1.0.0
- type: str
- aliases:
- - private_token
- - access_token
- filter:
- description: filter runners from GitLab API
- env:
- - name: GITLAB_FILTER
- version_added: 1.0.0
- type: str
- choices: ['active', 'paused', 'online', 'specific', 'shared']
- verbose_output:
- description: Toggle to (not) include all available nodes metadata
- type: bool
- default: true
-'''
+DOCUMENTATION = r"""
+name: gitlab_runners
+author:
+ - Stefan Heitmüller (@morph027)
+short_description: Ansible dynamic inventory plugin for GitLab runners
+requirements:
+ - python-gitlab > 1.8.0
+extends_documentation_fragment:
+ - constructed
+description:
+ - Reads inventories from the GitLab API.
+ - Uses a YAML configuration file gitlab_runners.[yml|yaml].
+options:
+ plugin:
+ description: The name of this plugin, it should always be set to V(gitlab_runners) for this plugin to recognize it as its own.
+ type: str
+ required: true
+ choices:
+ - gitlab_runners
+ - community.general.gitlab_runners
+ server_url:
+ description: The URL of the GitLab server, with protocol (i.e. http or https).
+ env:
+ - name: GITLAB_SERVER_URL
+ version_added: 1.0.0
+ type: str
+ required: true
+ api_token:
+ description: GitLab token for logging in.
+ env:
+ - name: GITLAB_API_TOKEN
+ version_added: 1.0.0
+ type: str
+ aliases:
+ - private_token
+ - access_token
+ filter:
+ description: Filter runners from GitLab API.
+ env:
+ - name: GITLAB_FILTER
+ version_added: 1.0.0
+ type: str
+ choices: ['active', 'paused', 'online', 'specific', 'shared']
+ verbose_output:
+ description: Toggle to (not) include all available nodes metadata.
+ type: bool
+ default: true
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
+---
# gitlab_runners.yml
plugin: community.general.gitlab_runners
host: https://gitlab.com
+---
# Example using constructed features to create groups and set ansible_host
plugin: community.general.gitlab_runners
host: https://gitlab.com
@@ -78,10 +79,9 @@ keyed_groups:
# hint: labels containing special characters will be converted to safe names
- key: 'tag_list'
prefix: tag
-'''
+"""
from ansible.errors import AnsibleError, AnsibleParserError
-from ansible.module_utils.common.text.converters import to_native
from ansible.plugins.inventory import BaseInventoryPlugin, Constructable
from ansible_collections.community.general.plugins.plugin_utils.unsafe import make_unsafe
@@ -124,7 +124,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable):
# Create groups based on variable values and add the corresponding hosts to it
self._add_host_to_keyed_groups(self.get_option('keyed_groups'), host_attrs, host, strict=strict)
except Exception as e:
- raise AnsibleParserError('Unable to fetch hosts from GitLab API, this was the original exception: %s' % to_native(e))
+ raise AnsibleParserError(f'Unable to fetch hosts from GitLab API, this was the original exception: {e}')
def verify_file(self, path):
"""Return the possibly of a file being consumable by this plugin."""
diff --git a/plugins/inventory/icinga2.py b/plugins/inventory/icinga2.py
index d1f2bc617f..64d77b437d 100644
--- a/plugins/inventory/icinga2.py
+++ b/plugins/inventory/icinga2.py
@@ -3,75 +3,73 @@
# Copyright (c) 2021 Ansible Project
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
+from __future__ import annotations
-__metaclass__ = type
-DOCUMENTATION = '''
- name: icinga2
- short_description: Icinga2 inventory source
- version_added: 3.7.0
- author:
- - Cliff Hults (@BongoEADGC6)
+DOCUMENTATION = r"""
+name: icinga2
+short_description: Icinga2 inventory source
+version_added: 3.7.0
+author:
+ - Cliff Hults (@BongoEADGC6)
+description:
+ - Get inventory hosts from the Icinga2 API.
+ - Uses a configuration file as an inventory source, it must end in C(.icinga2.yml) or C(.icinga2.yaml).
+extends_documentation_fragment:
+ - constructed
+options:
+ strict:
+ version_added: 4.4.0
+ compose:
+ version_added: 4.4.0
+ groups:
+ version_added: 4.4.0
+ keyed_groups:
+ version_added: 4.4.0
+ plugin:
+ description: Name of the plugin.
+ required: true
+ type: string
+ choices: ['community.general.icinga2']
+ url:
+ description: Root URL of Icinga2 API.
+ type: string
+ required: true
+ user:
+ description: Username to query the API.
+ type: string
+ required: true
+ password:
+ description: Password to query the API.
+ type: string
+ required: true
+ host_filter:
description:
- - Get inventory hosts from the Icinga2 API.
- - "Uses a configuration file as an inventory source, it must end in
- C(.icinga2.yml) or C(.icinga2.yaml)."
- extends_documentation_fragment:
- - constructed
- options:
- strict:
- version_added: 4.4.0
- compose:
- version_added: 4.4.0
- groups:
- version_added: 4.4.0
- keyed_groups:
- version_added: 4.4.0
- plugin:
- description: Name of the plugin.
- required: true
- type: string
- choices: ['community.general.icinga2']
- url:
- description: Root URL of Icinga2 API.
- type: string
- required: true
- user:
- description: Username to query the API.
- type: string
- required: true
- password:
- description: Password to query the API.
- type: string
- required: true
- host_filter:
- description:
- - An Icinga2 API valid host filter. Leave blank for no filtering
- type: string
- required: false
- validate_certs:
- description: Enables or disables SSL certificate verification.
- type: boolean
- default: true
- inventory_attr:
- description:
- - Allows the override of the inventory name based on different attributes.
- - This allows for changing the way limits are used.
- - The current default, V(address), is sometimes not unique or present. We recommend to use V(name) instead.
- type: string
- default: address
- choices: ['name', 'display_name', 'address']
- version_added: 4.2.0
- group_by_hostgroups:
- description:
- - Uses Icinga2 hostgroups as groups.
- type: boolean
- default: true
- version_added: 8.4.0
-'''
+ - An Icinga2 API valid host filter. Leave blank for no filtering.
+ type: string
+ required: false
+ validate_certs:
+ description: Enables or disables SSL certificate verification.
+ type: boolean
+ default: true
+ inventory_attr:
+ description:
+ - Allows the override of the inventory name based on different attributes.
+ - This allows for changing the way limits are used.
+ - The current default, V(address), is sometimes not unique or present. We recommend to use V(name) instead.
+ type: string
+ default: address
+ choices: ['name', 'display_name', 'address']
+ version_added: 4.2.0
+ group_by_hostgroups:
+ description:
+ - Uses Icinga2 hostgroups as groups.
+ type: boolean
+ default: true
+ version_added: 8.4.0
+"""
-EXAMPLES = r'''
+EXAMPLES = r"""
# my.icinga2.yml
plugin: community.general.icinga2
url: http://localhost:5665
@@ -94,7 +92,7 @@ compose:
# set 'ansible_user' and 'ansible_port' from icinga2 host vars
ansible_user: icinga2_attributes.vars.ansible_user
ansible_port: icinga2_attributes.vars.ansible_port | default(22)
-'''
+"""
import json
@@ -141,7 +139,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable):
'User-Agent': "ansible-icinga2-inv",
'Accept': "application/json",
}
- api_status_url = self.icinga2_url + "/status"
+ api_status_url = f"{self.icinga2_url}/status"
request_args = {
'headers': self.headers,
'url_username': self.icinga2_user,
@@ -151,7 +149,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable):
open_url(api_status_url, **request_args)
def _post_request(self, request_url, data=None):
- self.display.vvv("Requested URL: %s" % request_url)
+ self.display.vvv(f"Requested URL: {request_url}")
request_args = {
'headers': self.headers,
'url_username': self.icinga2_user,
@@ -160,42 +158,38 @@ class InventoryModule(BaseInventoryPlugin, Constructable):
}
if data is not None:
request_args['data'] = json.dumps(data)
- self.display.vvv("Request Args: %s" % request_args)
+ self.display.vvv(f"Request Args: {request_args}")
try:
response = open_url(request_url, **request_args)
except HTTPError as e:
try:
error_body = json.loads(e.read().decode())
- self.display.vvv("Error returned: {0}".format(error_body))
+ self.display.vvv(f"Error returned: {error_body}")
except Exception:
error_body = {"status": None}
if e.code == 404 and error_body.get('status') == "No objects found.":
raise AnsibleParserError("Host filter returned no data. Please confirm your host_filter value is valid")
- raise AnsibleParserError("Unexpected data returned: {0} -- {1}".format(e, error_body))
+ raise AnsibleParserError(f"Unexpected data returned: {e} -- {error_body}")
response_body = response.read()
json_data = json.loads(response_body.decode('utf-8'))
- self.display.vvv("Returned Data: %s" % json.dumps(json_data, indent=4, sort_keys=True))
+ self.display.vvv(f"Returned Data: {json.dumps(json_data, indent=4, sort_keys=True)}")
if 200 <= response.status <= 299:
return json_data
if response.status == 404 and json_data['status'] == "No objects found.":
raise AnsibleParserError(
- "API returned no data -- Response: %s - %s"
- % (response.status, json_data['status']))
+ f"API returned no data -- Response: {response.status} - {json_data['status']}")
if response.status == 401:
raise AnsibleParserError(
- "API was unable to complete query -- Response: %s - %s"
- % (response.status, json_data['status']))
+ f"API was unable to complete query -- Response: {response.status} - {json_data['status']}")
if response.status == 500:
raise AnsibleParserError(
- "API Response - %s - %s"
- % (json_data['status'], json_data['errors']))
+ f"API Response - {json_data['status']} - {json_data['errors']}")
raise AnsibleParserError(
- "Unexpected data returned - %s - %s"
- % (json_data['status'], json_data['errors']))
+ f"Unexpected data returned - {json_data['status']} - {json_data['errors']}")
def _query_hosts(self, hosts=None, attrs=None, joins=None, host_filter=None):
- query_hosts_url = "{0}/objects/hosts".format(self.icinga2_url)
+ query_hosts_url = f"{self.icinga2_url}/objects/hosts"
self.headers['X-HTTP-Method-Override'] = 'GET'
data_dict = dict()
if hosts:
@@ -296,13 +290,13 @@ class InventoryModule(BaseInventoryPlugin, Constructable):
self.group_by_hostgroups = self.get_option('group_by_hostgroups')
if self.templar.is_template(self.icinga2_url):
- self.icinga2_url = self.templar.template(variable=self.icinga2_url, disable_lookups=False)
+ self.icinga2_url = self.templar.template(variable=self.icinga2_url)
if self.templar.is_template(self.icinga2_user):
- self.icinga2_user = self.templar.template(variable=self.icinga2_user, disable_lookups=False)
+ self.icinga2_user = self.templar.template(variable=self.icinga2_user)
if self.templar.is_template(self.icinga2_password):
- self.icinga2_password = self.templar.template(variable=self.icinga2_password, disable_lookups=False)
+ self.icinga2_password = self.templar.template(variable=self.icinga2_password)
- self.icinga2_url = self.icinga2_url.rstrip('/') + '/v1'
+ self.icinga2_url = f"{self.icinga2_url.rstrip('/')}/v1"
# Not currently enabled
# self.cache_key = self.get_cache_key(path)
diff --git a/plugins/inventory/iocage.py b/plugins/inventory/iocage.py
new file mode 100644
index 0000000000..603003d617
--- /dev/null
+++ b/plugins/inventory/iocage.py
@@ -0,0 +1,419 @@
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2024 Vladimir Botka
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import annotations
+
+DOCUMENTATION = r"""
+name: iocage
+short_description: C(iocage) inventory source
+version_added: 10.2.0
+author:
+ - Vladimir Botka (@vbotka)
+requirements:
+ - iocage >= 1.8
+description:
+ - Get inventory hosts from the C(iocage) jail manager running on O(host).
+ - By default, O(host) is V(localhost). If O(host) is not V(localhost) it is expected that the user running Ansible on the
+ controller can connect to the O(host) account O(user) with SSH non-interactively and execute the command C(iocage list).
+ - Uses a configuration file as an inventory source, it must end in C(.iocage.yml) or C(.iocage.yaml).
+extends_documentation_fragment:
+ - ansible.builtin.constructed
+ - ansible.builtin.inventory_cache
+options:
+ plugin:
+ description:
+ - The name of this plugin, it should always be set to V(community.general.iocage) for this plugin to recognize it as
+ its own.
+ required: true
+ choices: ['community.general.iocage']
+ type: str
+ host:
+ description: The IP/hostname of the C(iocage) host.
+ type: str
+ default: localhost
+ user:
+ description:
+ - C(iocage) user. It is expected that the O(user) is able to connect to the O(host) with SSH and execute the command
+ C(iocage list). This option is not required if O(host=localhost).
+ type: str
+ sudo:
+ description:
+ - Enable execution as root.
+ - This requires passwordless sudo of the command C(iocage list*).
+ type: bool
+ default: false
+ version_added: 10.3.0
+ sudo_preserve_env:
+ description:
+ - Preserve environment if O(sudo) is enabled.
+ - This requires C(SETENV) sudoers tag.
+ type: bool
+ default: false
+ version_added: 10.3.0
+ get_properties:
+ description:
+ - Get jails' properties. Creates dictionary C(iocage_properties) for each added host.
+ type: bool
+ default: false
+ env:
+ description:
+ - O(user)'s environment on O(host).
+ - Enable O(sudo_preserve_env) if O(sudo) is enabled.
+ type: dict
+ default: {}
+ hooks_results:
+ description:
+ - List of paths to the files in a jail.
+ - Content of the files is stored in the items of the list C(iocage_hooks).
+ - If a file is not available the item keeps the dash character C(-).
+ - The variable C(iocage_hooks) is not created if O(hooks_results) is empty.
+ type: list
+ elements: path
+ version_added: 10.4.0
+ inventory_hostname_tag:
+ description:
+ - The name of the tag in the C(iocage properties notes) that contains the jails alias.
+ - By default, the C(iocage list -l) column C(NAME) is used to name the jail.
+ - This option requires the notes format C("t1=v1 t2=v2 ...").
+ - The option O(get_properties) must be enabled.
+ type: str
+ version_added: 11.0.0
+ inventory_hostname_required:
+ description:
+ - If enabled, the tag declared in O(inventory_hostname_tag) is required.
+ type: bool
+ default: false
+ version_added: 11.0.0
+notes:
+ - You might want to test the command C(ssh user@host iocage list -l) on the controller before using this inventory plugin
+ with O(user) specified and with O(host) other than V(localhost).
+ - If you run this inventory plugin on V(localhost) C(ssh) is not used. In this case, test the command C(iocage list -l).
+ - This inventory plugin creates variables C(iocage_*) for each added host.
+ - The values of these variables are collected from the output of the command C(iocage list -l).
+ - The names of these variables correspond to the output columns.
+ - The column C(NAME) is used to name the added host.
+ - The option O(hooks_results) expects the C(poolname) of a jail is mounted to C(/poolname). For example, if you activate
+ the pool C(iocage) this plugin expects to find the O(hooks_results) items in the path C(/iocage/iocage/jails//root).
+ If you mount the C(poolname) to a different path the easiest remedy is to create a symlink.
+"""
+
+EXAMPLES = r"""
+---
+# file name must end with iocage.yaml or iocage.yml
+plugin: community.general.iocage
+host: 10.1.0.73
+user: admin
+
+---
+# user is not required if iocage is running on localhost (default)
+plugin: community.general.iocage
+
+---
+# run cryptography without legacy algorithms
+plugin: community.general.iocage
+host: 10.1.0.73
+user: admin
+env:
+ CRYPTOGRAPHY_OPENSSL_NO_LEGACY: 1
+
+---
+# execute as root
+# sudoers example 'admin ALL=(ALL) NOPASSWD:SETENV: /usr/local/bin/iocage list*'
+plugin: community.general.iocage
+host: 10.1.0.73
+user: admin
+sudo: true
+sudo_preserve_env: true
+env:
+ CRYPTOGRAPHY_OPENSSL_NO_LEGACY: 1
+
+---
+# enable cache
+plugin: community.general.iocage
+host: 10.1.0.73
+user: admin
+env:
+ CRYPTOGRAPHY_OPENSSL_NO_LEGACY: 1
+cache: true
+
+---
+# see inventory plugin ansible.builtin.constructed
+plugin: community.general.iocage
+host: 10.1.0.73
+user: admin
+env:
+ CRYPTOGRAPHY_OPENSSL_NO_LEGACY: 1
+cache: true
+strict: false
+compose:
+ ansible_host: iocage_ip4
+ release: iocage_release | split('-') | first
+groups:
+ test: inventory_hostname.startswith('test')
+keyed_groups:
+ - prefix: distro
+ key: iocage_release
+ - prefix: state
+ key: iocage_state
+
+---
+# Read the file /var/db/dhclient-hook.address.epair0b in the jails and use it as ansible_host
+plugin: community.general.iocage
+host: 10.1.0.73
+user: admin
+hooks_results:
+ - /var/db/dhclient-hook.address.epair0b
+compose:
+ ansible_host: iocage_hooks.0
+groups:
+ test: inventory_hostname.startswith('test')
+"""
+
+import re
+import os
+from subprocess import Popen, PIPE
+
+from ansible.errors import AnsibleError, AnsibleParserError
+from ansible.module_utils.common.text.converters import to_native, to_text
+from ansible.plugins.inventory import BaseInventoryPlugin, Constructable, Cacheable
+from ansible.utils.display import Display
+
+display = Display()
+
+
+def _parse_ip4(ip4):
+ ''' Return dictionary iocage_ip4_dict. default = {ip4: [], msg: ''}.
+ If item matches ifc|IP or ifc|CIDR parse ifc, ip, and mask.
+ Otherwise, append item to msg.
+ '''
+
+ iocage_ip4_dict = {}
+ iocage_ip4_dict['ip4'] = []
+ iocage_ip4_dict['msg'] = ''
+
+ items = ip4.split(',')
+ for item in items:
+ if re.match('^\\w+\\|(?:\\d{1,3}\\.){3}\\d{1,3}.*$', item):
+ i = re.split('\\||/', item)
+ if len(i) == 3:
+ iocage_ip4_dict['ip4'].append({'ifc': i[0], 'ip': i[1], 'mask': i[2]})
+ else:
+ iocage_ip4_dict['ip4'].append({'ifc': i[0], 'ip': i[1], 'mask': '-'})
+ else:
+ iocage_ip4_dict['msg'] += item
+
+ return iocage_ip4_dict
+
+
+class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
+ ''' Host inventory parser for ansible using iocage as source. '''
+
+ NAME = 'community.general.iocage'
+ IOCAGE = '/usr/local/bin/iocage'
+
+ def __init__(self):
+ super(InventoryModule, self).__init__()
+
+ def verify_file(self, path):
+ valid = False
+ if super(InventoryModule, self).verify_file(path):
+ if path.endswith(('iocage.yaml', 'iocage.yml')):
+ valid = True
+ else:
+ self.display.vvv('Skipping due to inventory source not ending in "iocage.yaml" nor "iocage.yml"')
+ return valid
+
+ def parse(self, inventory, loader, path, cache=True):
+ super(InventoryModule, self).parse(inventory, loader, path)
+
+ self._read_config_data(path)
+ cache_key = self.get_cache_key(path)
+
+ user_cache_setting = self.get_option('cache')
+ attempt_to_read_cache = user_cache_setting and cache
+ cache_needs_update = user_cache_setting and not cache
+
+ if attempt_to_read_cache:
+ try:
+ results = self._cache[cache_key]
+ except KeyError:
+ cache_needs_update = True
+ if not attempt_to_read_cache or cache_needs_update:
+ results = self.get_inventory(path)
+ if cache_needs_update:
+ self._cache[cache_key] = results
+
+ self.populate(results)
+
+ def get_inventory(self, path):
+ host = self.get_option('host')
+ sudo = self.get_option('sudo')
+ sudo_preserve_env = self.get_option('sudo_preserve_env')
+ env = self.get_option('env')
+ get_properties = self.get_option('get_properties')
+ hooks_results = self.get_option('hooks_results')
+ inventory_hostname_tag = self.get_option('inventory_hostname_tag')
+ inventory_hostname_required = self.get_option('inventory_hostname_required')
+
+ cmd = []
+ my_env = os.environ.copy()
+ if host == 'localhost':
+ my_env.update({str(k): str(v) for k, v in env.items()})
+ else:
+ user = self.get_option('user')
+ cmd.append("ssh")
+ cmd.append(f"{user}@{host}")
+ cmd.extend([f"{k}={v}" for k, v in env.items()])
+
+ cmd_list = cmd.copy()
+ if sudo:
+ cmd_list.append('sudo')
+ if sudo_preserve_env:
+ cmd_list.append('--preserve-env')
+ cmd_list.append(self.IOCAGE)
+ cmd_list.append('list')
+ cmd_list.append('--long')
+ try:
+ p = Popen(cmd_list, stdout=PIPE, stderr=PIPE, env=my_env)
+ stdout, stderr = p.communicate()
+ if p.returncode != 0:
+ raise AnsibleError(f'Failed to run cmd={cmd_list}, rc={p.returncode}, stderr={to_native(stderr)}')
+
+ try:
+ t_stdout = to_text(stdout, errors='surrogate_or_strict')
+ except UnicodeError as e:
+ raise AnsibleError(f'Invalid (non unicode) input returned: {e}') from e
+
+ except Exception as e:
+ raise AnsibleParserError(f'Failed to parse {to_native(path)}: {e}') from e
+
+ results = {'_meta': {'hostvars': {}}}
+ self.get_jails(t_stdout, results)
+
+ if get_properties:
+ for hostname, host_vars in results['_meta']['hostvars'].items():
+ cmd_get_properties = cmd.copy()
+ cmd_get_properties.append(self.IOCAGE)
+ cmd_get_properties.append("get")
+ cmd_get_properties.append("--all")
+ cmd_get_properties.append(f"{hostname}")
+ try:
+ p = Popen(cmd_get_properties, stdout=PIPE, stderr=PIPE, env=my_env)
+ stdout, stderr = p.communicate()
+ if p.returncode != 0:
+ raise AnsibleError(
+ f'Failed to run cmd={cmd_get_properties}, rc={p.returncode}, stderr={to_native(stderr)}')
+
+ try:
+ t_stdout = to_text(stdout, errors='surrogate_or_strict')
+ except UnicodeError as e:
+ raise AnsibleError(f'Invalid (non unicode) input returned: {e}') from e
+
+ except Exception as e:
+ raise AnsibleError(f'Failed to get properties: {e}') from e
+
+ self.get_properties(t_stdout, results, hostname)
+
+ if hooks_results:
+ cmd_get_pool = cmd.copy()
+ cmd_get_pool.append(self.IOCAGE)
+ cmd_get_pool.append('get')
+ cmd_get_pool.append('--pool')
+ try:
+ p = Popen(cmd_get_pool, stdout=PIPE, stderr=PIPE, env=my_env)
+ stdout, stderr = p.communicate()
+ if p.returncode != 0:
+ raise AnsibleError(
+ f'Failed to run cmd={cmd_get_pool}, rc={p.returncode}, stderr={to_native(stderr)}')
+ try:
+ iocage_pool = to_text(stdout, errors='surrogate_or_strict').strip()
+ except UnicodeError as e:
+ raise AnsibleError(f'Invalid (non unicode) input returned: {e}') from e
+ except Exception as e:
+ raise AnsibleError(f'Failed to get pool: {e}') from e
+
+ for hostname, host_vars in results['_meta']['hostvars'].items():
+ iocage_hooks = []
+ for hook in hooks_results:
+ path = f"/{iocage_pool}/iocage/jails/{hostname}/root{hook}"
+ cmd_cat_hook = cmd.copy()
+ cmd_cat_hook.append('cat')
+ cmd_cat_hook.append(path)
+ try:
+ p = Popen(cmd_cat_hook, stdout=PIPE, stderr=PIPE, env=my_env)
+ stdout, stderr = p.communicate()
+ if p.returncode != 0:
+ iocage_hooks.append('-')
+ continue
+
+ try:
+ iocage_hook = to_text(stdout, errors='surrogate_or_strict').strip()
+ except UnicodeError as e:
+ raise AnsibleError(f'Invalid (non unicode) input returned: {e}') from e
+
+ except Exception:
+ iocage_hooks.append('-')
+ else:
+ iocage_hooks.append(iocage_hook)
+
+ results['_meta']['hostvars'][hostname]['iocage_hooks'] = iocage_hooks
+
+ # Optionally, get the jails names from the properties notes.
+ # Requires the notes format "t1=v1 t2=v2 ..."
+ if inventory_hostname_tag:
+ if not get_properties:
+ raise AnsibleError('Jail properties are needed to use inventory_hostname_tag. Enable get_properties')
+ update = {}
+ for hostname, host_vars in results['_meta']['hostvars'].items():
+ tags = dict(tag.split('=', 1) for tag in host_vars['iocage_properties']['notes'].split() if '=' in tag)
+ if inventory_hostname_tag in tags:
+ update[hostname] = tags[inventory_hostname_tag]
+ elif inventory_hostname_required:
+ raise AnsibleError(f'Mandatory tag {inventory_hostname_tag!r} is missing in the properties notes.')
+ for hostname, alias in update.items():
+ results['_meta']['hostvars'][alias] = results['_meta']['hostvars'].pop(hostname)
+
+ return results
+
+ def get_jails(self, t_stdout, results):
+ lines = t_stdout.splitlines()
+ if len(lines) < 5:
+ return
+ indices = [i for i, val in enumerate(lines[1]) if val == '|']
+ for line in lines[3::2]:
+ jail = [line[i + 1:j].strip() for i, j in zip(indices[:-1], indices[1:])]
+ iocage_name = jail[1]
+ iocage_ip4_dict = _parse_ip4(jail[6])
+ if iocage_ip4_dict['ip4']:
+ iocage_ip4 = ','.join([d['ip'] for d in iocage_ip4_dict['ip4']])
+ else:
+ iocage_ip4 = '-'
+ results['_meta']['hostvars'][iocage_name] = {}
+ results['_meta']['hostvars'][iocage_name]['iocage_jid'] = jail[0]
+ results['_meta']['hostvars'][iocage_name]['iocage_boot'] = jail[2]
+ results['_meta']['hostvars'][iocage_name]['iocage_state'] = jail[3]
+ results['_meta']['hostvars'][iocage_name]['iocage_type'] = jail[4]
+ results['_meta']['hostvars'][iocage_name]['iocage_release'] = jail[5]
+ results['_meta']['hostvars'][iocage_name]['iocage_ip4_dict'] = iocage_ip4_dict
+ results['_meta']['hostvars'][iocage_name]['iocage_ip4'] = iocage_ip4
+ results['_meta']['hostvars'][iocage_name]['iocage_ip6'] = jail[7]
+ results['_meta']['hostvars'][iocage_name]['iocage_template'] = jail[8]
+ results['_meta']['hostvars'][iocage_name]['iocage_basejail'] = jail[9]
+
+ def get_properties(self, t_stdout, results, hostname):
+ properties = dict([x.split(':', 1) for x in t_stdout.splitlines()])
+ results['_meta']['hostvars'][hostname]['iocage_properties'] = properties
+
+ def populate(self, results):
+ strict = self.get_option('strict')
+
+ for hostname, host_vars in results['_meta']['hostvars'].items():
+ self.inventory.add_host(hostname, group='all')
+ for var, value in host_vars.items():
+ self.inventory.set_variable(hostname, var, value)
+ self._set_composite_vars(self.get_option('compose'), host_vars, hostname, strict=True)
+ self._add_host_to_composed_groups(self.get_option('groups'), host_vars, hostname, strict=strict)
+ self._add_host_to_keyed_groups(self.get_option('keyed_groups'), host_vars, hostname, strict=strict)
diff --git a/plugins/inventory/linode.py b/plugins/inventory/linode.py
index 5c9a4718f5..bf6faba07a 100644
--- a/plugins/inventory/linode.py
+++ b/plugins/inventory/linode.py
@@ -3,91 +3,92 @@
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
-DOCUMENTATION = r'''
- name: linode
- author:
- - Luke Murphy (@decentral1se)
- short_description: Ansible dynamic inventory plugin for Linode.
- requirements:
- - linode_api4 >= 2.0.0
- description:
- - Reads inventories from the Linode API v4.
- - Uses a YAML configuration file that ends with linode.(yml|yaml).
- - Linode labels are used by default as the hostnames.
- - The default inventory groups are built from groups (deprecated by
- Linode) and not tags.
- extends_documentation_fragment:
- - constructed
- - inventory_cache
- options:
- cache:
- version_added: 4.5.0
- cache_plugin:
- version_added: 4.5.0
- cache_timeout:
- version_added: 4.5.0
- cache_connection:
- version_added: 4.5.0
- cache_prefix:
- version_added: 4.5.0
- plugin:
- description: Marks this as an instance of the 'linode' plugin.
- type: string
- required: true
- choices: ['linode', 'community.general.linode']
- ip_style:
- description: Populate hostvars with all information available from the Linode APIv4.
- type: string
- default: plain
- choices:
- - plain
- - api
- version_added: 3.6.0
- access_token:
- description: The Linode account personal access token.
- type: string
- required: true
- env:
- - name: LINODE_ACCESS_TOKEN
- regions:
- description: Populate inventory with instances in this region.
- default: []
- type: list
- elements: string
- tags:
- description: Populate inventory only with instances which have at least one of the tags listed here.
- default: []
- type: list
- elements: string
- version_added: 2.0.0
- types:
- description: Populate inventory with instances with this type.
- default: []
- type: list
- elements: string
- strict:
- version_added: 2.0.0
- compose:
- version_added: 2.0.0
- groups:
- version_added: 2.0.0
- keyed_groups:
- version_added: 2.0.0
-'''
+DOCUMENTATION = r"""
+name: linode
+author:
+ - Luke Murphy (@decentral1se)
+short_description: Ansible dynamic inventory plugin for Linode
+requirements:
+ - linode_api4 >= 2.0.0
+description:
+ - Reads inventories from the Linode API v4.
+ - Uses a YAML configuration file that ends with linode.(yml|yaml).
+ - Linode labels are used by default as the hostnames.
+ - The default inventory groups are built from groups (deprecated by Linode) and not tags.
+extends_documentation_fragment:
+ - constructed
+ - inventory_cache
+options:
+ cache:
+ version_added: 4.5.0
+ cache_plugin:
+ version_added: 4.5.0
+ cache_timeout:
+ version_added: 4.5.0
+ cache_connection:
+ version_added: 4.5.0
+ cache_prefix:
+ version_added: 4.5.0
+ plugin:
+ description: Marks this as an instance of the 'linode' plugin.
+ type: string
+ required: true
+ choices: ['linode', 'community.general.linode']
+ ip_style:
+ description: Populate hostvars with all information available from the Linode APIv4.
+ type: string
+ default: plain
+ choices:
+ - plain
+ - api
+ version_added: 3.6.0
+ access_token:
+ description: The Linode account personal access token.
+ type: string
+ required: true
+ env:
+ - name: LINODE_ACCESS_TOKEN
+ regions:
+ description: Populate inventory with instances in this region.
+ default: []
+ type: list
+ elements: string
+ tags:
+ description: Populate inventory only with instances which have at least one of the tags listed here.
+ default: []
+ type: list
+ elements: string
+ version_added: 2.0.0
+ types:
+ description: Populate inventory with instances with this type.
+ default: []
+ type: list
+ elements: string
+ strict:
+ version_added: 2.0.0
+ compose:
+ version_added: 2.0.0
+ groups:
+ version_added: 2.0.0
+ keyed_groups:
+ version_added: 2.0.0
+"""
-EXAMPLES = r'''
+EXAMPLES = r"""
+---
# Minimal example. `LINODE_ACCESS_TOKEN` is exposed in environment.
plugin: community.general.linode
+---
# You can use Jinja to template the access token.
plugin: community.general.linode
access_token: "{{ lookup('ini', 'token', section='your_username', file='~/.config/linode-cli') }}"
# For older Ansible versions, you need to write this as:
# access_token: "{{ lookup('ini', 'token section=your_username file=~/.config/linode-cli') }}"
+---
# Example with regions, types, groups and access token
plugin: community.general.linode
access_token: foobar
@@ -96,6 +97,7 @@ regions:
types:
- g5-standard-2
+---
# Example with keyed_groups, groups, and compose
plugin: community.general.linode
access_token: foobar
@@ -114,13 +116,14 @@ compose:
ansible_ssh_host: ipv4[0]
ansible_port: 2222
+---
# Example where control traffic limited to internal network
plugin: community.general.linode
access_token: foobar
ip_style: api
compose:
ansible_host: "ipv4 | community.general.json_query('[?public==`false`].address') | first"
-'''
+"""
from ansible.errors import AnsibleError
from ansible.plugins.inventory import BaseInventoryPlugin, Constructable, Cacheable
@@ -146,7 +149,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
access_token = self.get_option('access_token')
if self.templar.is_template(access_token):
- access_token = self.templar.template(variable=access_token, disable_lookups=False)
+ access_token = self.templar.template(variable=access_token)
if access_token is None:
raise AnsibleError((
@@ -161,7 +164,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
try:
self.instances = self.client.linode.instances()
except LinodeApiError as exception:
- raise AnsibleError('Linode client raised: %s' % exception)
+ raise AnsibleError(f'Linode client raised: {exception}')
def _add_groups(self):
"""Add Linode instance groups to the dynamic inventory."""
diff --git a/plugins/inventory/lxd.py b/plugins/inventory/lxd.py
index 9ae004f6c5..efdca6563e 100644
--- a/plugins/inventory/lxd.py
+++ b/plugins/inventory/lxd.py
@@ -3,120 +3,122 @@
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
-DOCUMENTATION = r'''
- name: lxd
- short_description: Returns Ansible inventory from lxd host
+DOCUMENTATION = r"""
+name: lxd
+short_description: Returns Ansible inventory from lxd host
+description:
+ - Get inventory from the lxd.
+ - Uses a YAML configuration file that ends with 'lxd.(yml|yaml)'.
+version_added: "3.0.0"
+author: "Frank Dornheim (@conloos)"
+requirements:
+ - ipaddress
+ - lxd >= 4.0
+options:
+ plugin:
+ description: Token that ensures this is a source file for the 'lxd' plugin.
+ type: string
+ required: true
+ choices: ['community.general.lxd']
+ url:
description:
- - Get inventory from the lxd.
- - Uses a YAML configuration file that ends with 'lxd.(yml|yaml)'.
- version_added: "3.0.0"
- author: "Frank Dornheim (@conloos)"
- requirements:
- - ipaddress
- - lxd >= 4.0
- options:
- plugin:
- description: Token that ensures this is a source file for the 'lxd' plugin.
- type: string
- required: true
- choices: [ 'community.general.lxd' ]
- url:
- description:
- - The unix domain socket path or the https URL for the lxd server.
- - Sockets in filesystem have to start with C(unix:).
- - Mostly C(unix:/var/lib/lxd/unix.socket) or C(unix:/var/snap/lxd/common/lxd/unix.socket).
- type: string
- default: unix:/var/snap/lxd/common/lxd/unix.socket
- client_key:
- description:
- - The client certificate key file path.
- aliases: [ key_file ]
- default: $HOME/.config/lxc/client.key
- type: path
- client_cert:
- description:
- - The client certificate file path.
- aliases: [ cert_file ]
- default: $HOME/.config/lxc/client.crt
- type: path
- server_cert:
- description:
- - The server certificate file path.
- type: path
- version_added: 8.0.0
- server_check_hostname:
- description:
- - This option controls if the server's hostname is checked as part of the HTTPS connection verification.
- This can be useful to disable, if for example, the server certificate provided (see O(server_cert) option)
- does not cover a name matching the one used to communicate with the server. Such mismatch is common as LXD
- generates self-signed server certificates by default.
- type: bool
- default: true
- version_added: 8.0.0
- trust_password:
- description:
- - The client trusted password.
- - You need to set this password on the lxd server before
- running this module using the following command
- C(lxc config set core.trust_password )
- See U(https://documentation.ubuntu.com/lxd/en/latest/authentication/#adding-client-certificates-using-a-trust-password).
- - If O(trust_password) is set, this module send a request for authentication before sending any requests.
- type: str
- state:
- description: Filter the instance according to the current status.
- type: str
- default: none
- choices: [ 'STOPPED', 'STARTING', 'RUNNING', 'none' ]
- project:
- description: Filter the instance according to the given project.
- type: str
- default: default
- version_added: 6.2.0
- type_filter:
- description:
- - Filter the instances by type V(virtual-machine), V(container) or V(both).
- - The first version of the inventory only supported containers.
- type: str
- default: container
- choices: [ 'virtual-machine', 'container', 'both' ]
- version_added: 4.2.0
- prefered_instance_network_interface:
- description:
- - If an instance has multiple network interfaces, select which one is the preferred as pattern.
- - Combined with the first number that can be found e.g. 'eth' + 0.
- - The option has been renamed from O(prefered_container_network_interface) to O(prefered_instance_network_interface)
- in community.general 3.8.0. The old name still works as an alias.
- type: str
- default: eth
- aliases:
- - prefered_container_network_interface
- prefered_instance_network_family:
- description:
- - If an instance has multiple network interfaces, which one is the preferred by family.
- - Specify V(inet) for IPv4 and V(inet6) for IPv6.
- type: str
- default: inet
- choices: [ 'inet', 'inet6' ]
- groupby:
- description:
- - Create groups by the following keywords C(location), C(network_range), C(os), C(pattern), C(profile), C(release), C(type), C(vlanid).
- - See example for syntax.
- type: dict
-'''
+ - The unix domain socket path or the https URL for the lxd server.
+ - Sockets in filesystem have to start with C(unix:).
+ - Mostly C(unix:/var/lib/lxd/unix.socket) or C(unix:/var/snap/lxd/common/lxd/unix.socket).
+ type: string
+ default: unix:/var/snap/lxd/common/lxd/unix.socket
+ client_key:
+ description:
+ - The client certificate key file path.
+ aliases: [key_file]
+ default: $HOME/.config/lxc/client.key
+ type: path
+ client_cert:
+ description:
+ - The client certificate file path.
+ aliases: [cert_file]
+ default: $HOME/.config/lxc/client.crt
+ type: path
+ server_cert:
+ description:
+ - The server certificate file path.
+ type: path
+ version_added: 8.0.0
+ server_check_hostname:
+ description:
+ - This option controls if the server's hostname is checked as part of the HTTPS connection verification. This can be
+ useful to disable, if for example, the server certificate provided (see O(server_cert) option) does not cover a name
+ matching the one used to communicate with the server. Such mismatch is common as LXD generates self-signed server
+ certificates by default.
+ type: bool
+ default: true
+ version_added: 8.0.0
+ trust_password:
+ description:
+ - The client trusted password.
+ - You need to set this password on the lxd server before running this module using the following command C(lxc config
+ set core.trust_password ) See
+ U(https://documentation.ubuntu.com/lxd/en/latest/authentication/#adding-client-certificates-using-a-trust-password).
+ - If O(trust_password) is set, this module send a request for authentication before sending any requests.
+ type: str
+ state:
+ description: Filter the instance according to the current status.
+ type: str
+ default: none
+ choices: ['STOPPED', 'STARTING', 'RUNNING', 'none']
+ project:
+ description: Filter the instance according to the given project.
+ type: str
+ default: default
+ version_added: 6.2.0
+ type_filter:
+ description:
+ - Filter the instances by type V(virtual-machine), V(container) or V(both).
+ - The first version of the inventory only supported containers.
+ type: str
+ default: container
+ choices: ['virtual-machine', 'container', 'both']
+ version_added: 4.2.0
+ prefered_instance_network_interface:
+ description:
+ - If an instance has multiple network interfaces, select which one is the preferred as pattern.
+ - Combined with the first number that can be found, for example C(eth) + C(0).
+ - The option has been renamed from O(prefered_container_network_interface) to O(prefered_instance_network_interface)
+ in community.general 3.8.0. The old name still works as an alias.
+ type: str
+ default: eth
+ aliases:
+ - prefered_container_network_interface
+ prefered_instance_network_family:
+ description:
+ - If an instance has multiple network interfaces, which one is the preferred by family.
+ - Specify V(inet) for IPv4 and V(inet6) for IPv6.
+ type: str
+ default: inet
+ choices: ['inet', 'inet6']
+ groupby:
+ description:
+ - Create groups by the following keywords C(location), C(network_range), C(os), C(pattern), C(profile), C(release),
+ C(type), C(vlanid).
+ - See example for syntax.
+ type: dict
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
+---
# simple lxd.yml
plugin: community.general.lxd
url: unix:/var/snap/lxd/common/lxd/unix.socket
+---
# simple lxd.yml including filter
plugin: community.general.lxd
url: unix:/var/snap/lxd/common/lxd/unix.socket
state: RUNNING
+---
# simple lxd.yml including virtual machines and containers
plugin: community.general.lxd
url: unix:/var/snap/lxd/common/lxd/unix.socket
@@ -163,7 +165,7 @@ groupby:
projectInternals:
type: project
attribute: internals
-'''
+"""
import json
import re
@@ -211,7 +213,7 @@ class InventoryModule(BaseInventoryPlugin):
with open(path, 'r') as json_file:
return json.load(json_file)
except (IOError, json.decoder.JSONDecodeError) as err:
- raise AnsibleParserError('Could not load the test data from {0}: {1}'.format(to_native(path), to_native(err)))
+ raise AnsibleParserError(f'Could not load the test data from {to_native(path)}: {err}')
def save_json_data(self, path, file_name=None):
"""save data as json
@@ -241,7 +243,7 @@ class InventoryModule(BaseInventoryPlugin):
with open(os.path.abspath(os.path.join(cwd, *path)), 'w') as json_file:
json.dump(self.data, json_file)
except IOError as err:
- raise AnsibleParserError('Could not save data: {0}'.format(to_native(err)))
+ raise AnsibleParserError(f'Could not save data: {err}')
def verify_file(self, path):
"""Check the config
@@ -281,7 +283,7 @@ class InventoryModule(BaseInventoryPlugin):
if not isinstance(url, str):
return False
if not url.startswith(('unix:', 'https:')):
- raise AnsibleError('URL is malformed: {0}'.format(to_native(url)))
+ raise AnsibleError(f'URL is malformed: {url}')
return True
def _connect_to_socket(self):
@@ -306,7 +308,7 @@ class InventoryModule(BaseInventoryPlugin):
return socket_connection
except LXDClientException as err:
error_storage[url] = err
- raise AnsibleError('No connection to the socket: {0}'.format(to_native(error_storage)))
+ raise AnsibleError(f'No connection to the socket: {error_storage}')
def _get_networks(self):
"""Get Networknames
@@ -355,7 +357,7 @@ class InventoryModule(BaseInventoryPlugin):
# }
url = '/1.0/instances'
if self.project:
- url = url + '?{0}'.format(urlencode(dict(project=self.project)))
+ url = f"{url}?{urlencode(dict(project=self.project))}"
instances = self.socket.do('GET', url)
@@ -383,10 +385,10 @@ class InventoryModule(BaseInventoryPlugin):
config = {}
if isinstance(branch, (tuple, list)):
config[name] = {branch[1]: self.socket.do(
- 'GET', '/1.0/{0}/{1}/{2}?{3}'.format(to_native(branch[0]), to_native(name), to_native(branch[1]), urlencode(dict(project=self.project))))}
+ 'GET', f'/1.0/{to_native(branch[0])}/{to_native(name)}/{to_native(branch[1])}?{urlencode(dict(project=self.project))}')}
else:
config[name] = {branch: self.socket.do(
- 'GET', '/1.0/{0}/{1}?{2}'.format(to_native(branch), to_native(name), urlencode(dict(project=self.project))))}
+ 'GET', f'/1.0/{to_native(branch)}/{to_native(name)}?{urlencode(dict(project=self.project))}')}
return config
def get_instance_data(self, names):
@@ -449,7 +451,7 @@ class InventoryModule(BaseInventoryPlugin):
None
Returns:
dict(network_configuration): network config"""
- instance_network_interfaces = self._get_data_entry('instances/{0}/state/metadata/network'.format(instance_name))
+ instance_network_interfaces = self._get_data_entry(f'instances/{instance_name}/state/metadata/network')
network_configuration = None
if instance_network_interfaces:
network_configuration = {}
@@ -462,7 +464,7 @@ class InventoryModule(BaseInventoryPlugin):
address_set['family'] = address.get('family')
address_set['address'] = address.get('address')
address_set['netmask'] = address.get('netmask')
- address_set['combined'] = address.get('address') + '/' + address.get('netmask')
+ address_set['combined'] = f"{address.get('address')}/{address.get('netmask')}"
network_configuration[interface_name].append(address_set)
return network_configuration
@@ -479,7 +481,7 @@ class InventoryModule(BaseInventoryPlugin):
None
Returns:
str(prefered_interface): None or interface name"""
- instance_network_interfaces = self._get_data_entry('inventory/{0}/network_interfaces'.format(instance_name))
+ instance_network_interfaces = self._get_data_entry(f'inventory/{instance_name}/network_interfaces')
prefered_interface = None # init
if instance_network_interfaces: # instance have network interfaces
# generator if interfaces which start with the desired pattern
@@ -516,7 +518,7 @@ class InventoryModule(BaseInventoryPlugin):
# "network":"lxdbr0",
# "type":"nic"},
vlan_ids = {}
- devices = self._get_data_entry('instances/{0}/instances/metadata/expanded_devices'.format(to_native(instance_name)))
+ devices = self._get_data_entry(f'instances/{to_native(instance_name)}/instances/metadata/expanded_devices')
for device in devices:
if 'network' in devices[device]:
if devices[device]['network'] in network_vlans:
@@ -579,7 +581,7 @@ class InventoryModule(BaseInventoryPlugin):
else:
path[instance_name][key] = value
except KeyError as err:
- raise AnsibleParserError("Unable to store Information: {0}".format(to_native(err)))
+ raise AnsibleParserError(f"Unable to store Information: {err}")
def extract_information_from_instance_configs(self):
"""Process configuration information
@@ -600,24 +602,24 @@ class InventoryModule(BaseInventoryPlugin):
for instance_name in self.data['instances']:
self._set_data_entry(instance_name, 'os', self._get_data_entry(
- 'instances/{0}/instances/metadata/config/image.os'.format(instance_name)))
+ f'instances/{instance_name}/instances/metadata/config/image.os'))
self._set_data_entry(instance_name, 'release', self._get_data_entry(
- 'instances/{0}/instances/metadata/config/image.release'.format(instance_name)))
+ f'instances/{instance_name}/instances/metadata/config/image.release'))
self._set_data_entry(instance_name, 'version', self._get_data_entry(
- 'instances/{0}/instances/metadata/config/image.version'.format(instance_name)))
+ f'instances/{instance_name}/instances/metadata/config/image.version'))
self._set_data_entry(instance_name, 'profile', self._get_data_entry(
- 'instances/{0}/instances/metadata/profiles'.format(instance_name)))
+ f'instances/{instance_name}/instances/metadata/profiles'))
self._set_data_entry(instance_name, 'location', self._get_data_entry(
- 'instances/{0}/instances/metadata/location'.format(instance_name)))
+ f'instances/{instance_name}/instances/metadata/location'))
self._set_data_entry(instance_name, 'state', self._get_data_entry(
- 'instances/{0}/instances/metadata/config/volatile.last_state.power'.format(instance_name)))
+ f'instances/{instance_name}/instances/metadata/config/volatile.last_state.power'))
self._set_data_entry(instance_name, 'type', self._get_data_entry(
- 'instances/{0}/instances/metadata/type'.format(instance_name)))
+ f'instances/{instance_name}/instances/metadata/type'))
self._set_data_entry(instance_name, 'network_interfaces', self.extract_network_information_from_instance_config(instance_name))
self._set_data_entry(instance_name, 'preferred_interface', self.get_prefered_instance_network_interface(instance_name))
self._set_data_entry(instance_name, 'vlan_ids', self.get_instance_vlans(instance_name))
self._set_data_entry(instance_name, 'project', self._get_data_entry(
- 'instances/{0}/instances/metadata/project'.format(instance_name)))
+ f'instances/{instance_name}/instances/metadata/project'))
def build_inventory_network(self, instance_name):
"""Add the network interfaces of the instance to the inventory
@@ -651,18 +653,18 @@ class InventoryModule(BaseInventoryPlugin):
None
Returns:
dict(interface_name: ip)"""
- prefered_interface = self._get_data_entry('inventory/{0}/preferred_interface'.format(instance_name)) # name or None
+ prefered_interface = self._get_data_entry(f'inventory/{instance_name}/preferred_interface') # name or None
prefered_instance_network_family = self.prefered_instance_network_family
ip_address = ''
if prefered_interface:
- interface = self._get_data_entry('inventory/{0}/network_interfaces/{1}'.format(instance_name, prefered_interface))
+ interface = self._get_data_entry(f'inventory/{instance_name}/network_interfaces/{prefered_interface}')
for config in interface:
if config['family'] == prefered_instance_network_family:
ip_address = config['address']
break
else:
- interfaces = self._get_data_entry('inventory/{0}/network_interfaces'.format(instance_name))
+ interfaces = self._get_data_entry(f'inventory/{instance_name}/network_interfaces')
for interface in interfaces.values():
for config in interface:
if config['family'] == prefered_instance_network_family:
@@ -670,7 +672,7 @@ class InventoryModule(BaseInventoryPlugin):
break
return ip_address
- if self._get_data_entry('inventory/{0}/network_interfaces'.format(instance_name)): # instance have network interfaces
+ if self._get_data_entry(f'inventory/{instance_name}/network_interfaces'): # instance have network interfaces
self.inventory.set_variable(instance_name, 'ansible_connection', 'ssh')
self.inventory.set_variable(instance_name, 'ansible_host', make_unsafe(interface_selection(instance_name)))
else:
@@ -691,7 +693,7 @@ class InventoryModule(BaseInventoryPlugin):
Returns:
None"""
for instance_name in self.data['inventory']:
- instance_state = str(self._get_data_entry('inventory/{0}/state'.format(instance_name)) or "STOPPED").lower()
+ instance_state = str(self._get_data_entry(f'inventory/{instance_name}/state') or "STOPPED").lower()
# Only consider instances that match the "state" filter, if self.state is not None
if self.filter:
@@ -703,34 +705,34 @@ class InventoryModule(BaseInventoryPlugin):
# add network information
self.build_inventory_network(instance_name)
# add os
- v = self._get_data_entry('inventory/{0}/os'.format(instance_name))
+ v = self._get_data_entry(f'inventory/{instance_name}/os')
if v:
self.inventory.set_variable(instance_name, 'ansible_lxd_os', make_unsafe(v.lower()))
# add release
- v = self._get_data_entry('inventory/{0}/release'.format(instance_name))
+ v = self._get_data_entry(f'inventory/{instance_name}/release')
if v:
self.inventory.set_variable(
instance_name, 'ansible_lxd_release', make_unsafe(v.lower()))
# add profile
self.inventory.set_variable(
- instance_name, 'ansible_lxd_profile', make_unsafe(self._get_data_entry('inventory/{0}/profile'.format(instance_name))))
+ instance_name, 'ansible_lxd_profile', make_unsafe(self._get_data_entry(f'inventory/{instance_name}/profile')))
# add state
self.inventory.set_variable(
instance_name, 'ansible_lxd_state', make_unsafe(instance_state))
# add type
self.inventory.set_variable(
- instance_name, 'ansible_lxd_type', make_unsafe(self._get_data_entry('inventory/{0}/type'.format(instance_name))))
+ instance_name, 'ansible_lxd_type', make_unsafe(self._get_data_entry(f'inventory/{instance_name}/type')))
# add location information
- if self._get_data_entry('inventory/{0}/location'.format(instance_name)) != "none": # wrong type by lxd 'none' != 'None'
+ if self._get_data_entry(f'inventory/{instance_name}/location') != "none": # wrong type by lxd 'none' != 'None'
self.inventory.set_variable(
- instance_name, 'ansible_lxd_location', make_unsafe(self._get_data_entry('inventory/{0}/location'.format(instance_name))))
+ instance_name, 'ansible_lxd_location', make_unsafe(self._get_data_entry(f'inventory/{instance_name}/location')))
# add VLAN_ID information
- if self._get_data_entry('inventory/{0}/vlan_ids'.format(instance_name)):
+ if self._get_data_entry(f'inventory/{instance_name}/vlan_ids'):
self.inventory.set_variable(
- instance_name, 'ansible_lxd_vlan_ids', make_unsafe(self._get_data_entry('inventory/{0}/vlan_ids'.format(instance_name))))
+ instance_name, 'ansible_lxd_vlan_ids', make_unsafe(self._get_data_entry(f'inventory/{instance_name}/vlan_ids')))
# add project
self.inventory.set_variable(
- instance_name, 'ansible_lxd_project', make_unsafe(self._get_data_entry('inventory/{0}/project'.format(instance_name))))
+ instance_name, 'ansible_lxd_project', make_unsafe(self._get_data_entry(f'inventory/{instance_name}/project')))
def build_inventory_groups_location(self, group_name):
"""create group by attribute: location
@@ -792,7 +794,7 @@ class InventoryModule(BaseInventoryPlugin):
network = ipaddress.ip_network(to_text(self.groupby[group_name].get('attribute')))
except ValueError as err:
raise AnsibleParserError(
- 'Error while parsing network range {0}: {1}'.format(self.groupby[group_name].get('attribute'), to_native(err)))
+ f"Error while parsing network range {self.groupby[group_name].get('attribute')}: {err}")
for instance_name in self.inventory.hosts:
if self.data['inventory'][instance_name].get('network_interfaces') is not None:
@@ -997,12 +999,12 @@ class InventoryModule(BaseInventoryPlugin):
elif self.groupby[group_name].get('type') == 'project':
self.build_inventory_groups_project(group_name)
else:
- raise AnsibleParserError('Unknown group type: {0}'.format(to_native(group_name)))
+ raise AnsibleParserError(f'Unknown group type: {to_native(group_name)}')
if self.groupby:
for group_name in self.groupby:
if not group_name.isalnum():
- raise AnsibleParserError('Invalid character(s) in groupname: {0}'.format(to_native(group_name)))
+ raise AnsibleParserError(f'Invalid character(s) in groupname: {to_native(group_name)}')
group_type(make_unsafe(group_name))
def build_inventory(self):
@@ -1039,7 +1041,7 @@ class InventoryModule(BaseInventoryPlugin):
None"""
iter_keys = list(self.data['instances'].keys())
for instance_name in iter_keys:
- if self._get_data_entry('instances/{0}/instances/metadata/type'.format(instance_name)) != self.type_filter:
+ if self._get_data_entry(f'instances/{instance_name}/instances/metadata/type') != self.type_filter:
del self.data['instances'][instance_name]
def _populate(self):
@@ -1120,6 +1122,6 @@ class InventoryModule(BaseInventoryPlugin):
self.url = self.get_option('url')
except Exception as err:
raise AnsibleParserError(
- 'All correct options required: {0}'.format(to_native(err)))
+ f'All correct options required: {err}')
# Call our internal helper to populate the dynamic inventory
self._populate()
diff --git a/plugins/inventory/nmap.py b/plugins/inventory/nmap.py
index 48f02c446b..3339d66b46 100644
--- a/plugins/inventory/nmap.py
+++ b/plugins/inventory/nmap.py
@@ -3,112 +3,118 @@
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
-DOCUMENTATION = '''
- author: Unknown (!UNKNOWN)
- name: nmap
- short_description: Uses nmap to find hosts to target
+DOCUMENTATION = r"""
+author: Unknown (!UNKNOWN)
+name: nmap
+short_description: Uses nmap to find hosts to target
+description:
+ - Uses a YAML configuration file with a valid YAML extension.
+extends_documentation_fragment:
+ - constructed
+ - inventory_cache
+requirements:
+ - nmap CLI installed
+options:
+ plugin:
+ description: Token that ensures this is a source file for the P(community.general.nmap#inventory) plugin.
+ type: string
+ required: true
+ choices: ['nmap', 'community.general.nmap']
+ sudo:
+ description: Set to V(true) to execute a C(sudo nmap) plugin scan.
+ version_added: 4.8.0
+ default: false
+ type: boolean
+ address:
+ description: Network IP or range of IPs to scan, you can use a simple range (10.2.2.15-25) or CIDR notation.
+ type: string
+ required: true
+ env:
+ - name: ANSIBLE_NMAP_ADDRESS
+ version_added: 6.6.0
+ exclude:
description:
- - Uses a YAML configuration file with a valid YAML extension.
- extends_documentation_fragment:
- - constructed
- - inventory_cache
- requirements:
- - nmap CLI installed
- options:
- plugin:
- description: token that ensures this is a source file for the 'nmap' plugin.
- type: string
- required: true
- choices: ['nmap', 'community.general.nmap']
- sudo:
- description: Set to V(true) to execute a C(sudo nmap) plugin scan.
- version_added: 4.8.0
- default: false
- type: boolean
- address:
- description: Network IP or range of IPs to scan, you can use a simple range (10.2.2.15-25) or CIDR notation.
- type: string
- required: true
- env:
- - name: ANSIBLE_NMAP_ADDRESS
- version_added: 6.6.0
- exclude:
- description:
- - List of addresses to exclude.
- - For example V(10.2.2.15-25) or V(10.2.2.15,10.2.2.16).
- type: list
- elements: string
- env:
- - name: ANSIBLE_NMAP_EXCLUDE
- version_added: 6.6.0
- port:
- description:
- - Only scan specific port or port range (C(-p)).
- - For example, you could pass V(22) for a single port, V(1-65535) for a range of ports,
- or V(U:53,137,T:21-25,139,8080,S:9) to check port 53 with UDP, ports 21-25 with TCP, port 9 with SCTP, and ports 137, 139, and 8080 with all.
- type: string
- version_added: 6.5.0
- ports:
- description: Enable/disable scanning ports.
- type: boolean
- default: true
- ipv4:
- description: use IPv4 type addresses
- type: boolean
- default: true
- ipv6:
- description: use IPv6 type addresses
- type: boolean
- default: true
- udp_scan:
- description:
- - Scan via UDP.
- - Depending on your system you might need O(sudo=true) for this to work.
- type: boolean
- default: false
- version_added: 6.1.0
- icmp_timestamp:
- description:
- - Scan via ICMP Timestamp (C(-PP)).
- - Depending on your system you might need O(sudo=true) for this to work.
- type: boolean
- default: false
- version_added: 6.1.0
- open:
- description: Only scan for open (or possibly open) ports.
- type: boolean
- default: false
- version_added: 6.5.0
- dns_resolve:
- description: Whether to always (V(true)) or never (V(false)) do DNS resolution.
- type: boolean
- default: false
- version_added: 6.1.0
- use_arp_ping:
- description: Whether to always (V(true)) use the quick ARP ping or (V(false)) a slower but more reliable method.
- type: boolean
- default: true
- version_added: 7.4.0
- notes:
- - At least one of O(ipv4) or O(ipv6) is required to be V(true); both can be V(true), but they cannot both be V(false).
- - 'TODO: add OS fingerprinting'
-'''
-EXAMPLES = '''
+ - List of addresses to exclude.
+ - For example V(10.2.2.15-25) or V(10.2.2.15,10.2.2.16).
+ type: list
+ elements: string
+ env:
+ - name: ANSIBLE_NMAP_EXCLUDE
+ version_added: 6.6.0
+ port:
+ description:
+ - Only scan specific port or port range (C(-p)).
+ - For example, you could pass V(22) for a single port, V(1-65535) for a range of ports, or V(U:53,137,T:21-25,139,8080,S:9)
+ to check port 53 with UDP, ports 21-25 with TCP, port 9 with SCTP, and ports 137, 139, and 8080 with all.
+ type: string
+ version_added: 6.5.0
+ ports:
+ description: Enable/disable scanning ports.
+ type: boolean
+ default: true
+ ipv4:
+ description: Use IPv4 type addresses.
+ type: boolean
+ default: true
+ ipv6:
+ description: Use IPv6 type addresses.
+ type: boolean
+ default: true
+ udp_scan:
+ description:
+ - Scan using UDP.
+ - Depending on your system you might need O(sudo=true) for this to work.
+ type: boolean
+ default: false
+ version_added: 6.1.0
+ icmp_timestamp:
+ description:
+ - Scan using ICMP Timestamp (C(-PP)).
+ - Depending on your system you might need O(sudo=true) for this to work.
+ type: boolean
+ default: false
+ version_added: 6.1.0
+ open:
+ description: Only scan for open (or possibly open) ports.
+ type: boolean
+ default: false
+ version_added: 6.5.0
+ dns_resolve:
+ description: Whether to always (V(true)) or never (V(false)) do DNS resolution.
+ type: boolean
+ default: false
+ version_added: 6.1.0
+ dns_servers:
+ description: Specify which DNS servers to use for name resolution.
+ type: list
+ elements: string
+ version_added: 10.5.0
+ use_arp_ping:
+ description: Whether to always (V(true)) use the quick ARP ping or (V(false)) a slower but more reliable method.
+ type: boolean
+ default: true
+ version_added: 7.4.0
+notes:
+ - At least one of O(ipv4) or O(ipv6) is required to be V(true); both can be V(true), but they cannot both be V(false).
+ - 'TODO: add OS fingerprinting.'
+"""
+EXAMPLES = r"""
+---
# inventory.config file in YAML format
plugin: community.general.nmap
strict: false
address: 192.168.0.0/24
-
+---
# a sudo nmap scan to fully use nmap scan power.
plugin: community.general.nmap
sudo: true
strict: false
address: 192.168.0.0/24
+---
# an nmap scan specifying ports and classifying results to an inventory group
plugin: community.general.nmap
address: 192.168.0.0/24
@@ -116,7 +122,7 @@ exclude: 192.168.0.1, web.example.com
port: 22, 443
groups:
web_servers: "ports | selectattr('port', 'equalto', '443')"
-'''
+"""
import os
import re
@@ -178,7 +184,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
try:
self._nmap = get_bin_path('nmap')
except ValueError as e:
- raise AnsibleParserError('nmap inventory plugin requires the nmap cli tool to work: {0}'.format(to_native(e)))
+ raise AnsibleParserError(f'nmap inventory plugin requires the nmap cli tool to work: {e}')
super(InventoryModule, self).parse(inventory, loader, path, cache=cache)
@@ -230,6 +236,10 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
if self.get_option('dns_resolve'):
cmd.append('-n')
+ if self.get_option('dns_servers'):
+ cmd.append('--dns-servers')
+ cmd.append(','.join(self.get_option('dns_servers')))
+
if self.get_option('udp_scan'):
cmd.append('-sU')
@@ -248,7 +258,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
p = Popen(cmd, stdout=PIPE, stderr=PIPE)
stdout, stderr = p.communicate()
if p.returncode != 0:
- raise AnsibleParserError('Failed to run nmap, rc=%s: %s' % (p.returncode, to_native(stderr)))
+ raise AnsibleParserError(f'Failed to run nmap, rc={p.returncode}: {to_native(stderr)}')
# parse results
host = None
@@ -259,7 +269,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
try:
t_stdout = to_text(stdout, errors='surrogate_or_strict')
except UnicodeError as e:
- raise AnsibleParserError('Invalid (non unicode) input returned: %s' % to_native(e))
+ raise AnsibleParserError(f'Invalid (non unicode) input returned: {e}')
for line in t_stdout.splitlines():
hits = self.find_host.match(line)
@@ -300,7 +310,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
results[-1]['ports'] = ports
except Exception as e:
- raise AnsibleParserError("failed to parse %s: %s " % (to_native(path), to_native(e)))
+ raise AnsibleParserError(f"failed to parse {to_native(path)}: {e} ")
if cache_needs_update:
self._cache[cache_key] = results
diff --git a/plugins/inventory/online.py b/plugins/inventory/online.py
index 70b8d14192..8b4821a009 100644
--- a/plugins/inventory/online.py
+++ b/plugins/inventory/online.py
@@ -3,52 +3,51 @@
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
-DOCUMENTATION = r'''
- name: online
- author:
- - Remy Leone (@remyleone)
- short_description: Scaleway (previously Online SAS or Online.net) inventory source
- description:
- - Get inventory hosts from Scaleway (previously Online SAS or Online.net).
- options:
- plugin:
- description: token that ensures this is a source file for the 'online' plugin.
- type: string
- required: true
- choices: ['online', 'community.general.online']
- oauth_token:
- required: true
- description: Online OAuth token.
- type: string
- env:
- # in order of precedence
- - name: ONLINE_TOKEN
- - name: ONLINE_API_KEY
- - name: ONLINE_OAUTH_TOKEN
- hostnames:
- description: List of preference about what to use as an hostname.
- type: list
- elements: string
- default:
- - public_ipv4
- choices:
- - public_ipv4
- - private_ipv4
- - hostname
- groups:
- description: List of groups.
- type: list
- elements: string
- choices:
- - location
- - offer
- - rpn
-'''
+DOCUMENTATION = r"""
+name: online
+author:
+ - Remy Leone (@remyleone)
+short_description: Scaleway (previously Online SAS or Online.net) inventory source
+description:
+ - Get inventory hosts from Scaleway (previously Online SAS or Online.net).
+options:
+ plugin:
+ description: Token that ensures this is a source file for the P(community.general.online#inventory) plugin.
+ type: string
+ required: true
+ choices: ['online', 'community.general.online']
+ oauth_token:
+ required: true
+ description: Online OAuth token.
+ type: string
+ env:
+ # in order of precedence
+ - name: ONLINE_TOKEN
+ - name: ONLINE_API_KEY
+ - name: ONLINE_OAUTH_TOKEN
+ hostnames:
+ description: List of preference about what to use as an hostname.
+ type: list
+ elements: string
+ default:
+ - public_ipv4
+ choices:
+ - public_ipv4
+ - private_ipv4
+ - hostname
+ groups:
+ description: List of groups.
+ type: list
+ elements: string
+ choices:
+ - location
+ - offer
+ - rpn
+"""
-EXAMPLES = r'''
+EXAMPLES = r"""
# online_inventory.yml file in YAML format
# Example command line: ansible-inventory --list -i online_inventory.yml
@@ -59,7 +58,7 @@ groups:
- location
- offer
- rpn
-'''
+"""
import json
from sys import version as python_version
@@ -138,7 +137,7 @@ class InventoryModule(BaseInventoryPlugin):
try:
response = open_url(url, headers=self.headers)
except Exception as e:
- self.display.warning("An error happened while fetching: %s" % url)
+ self.display.warning(f"An error happened while fetching: {url}")
return None
try:
@@ -245,8 +244,8 @@ class InventoryModule(BaseInventoryPlugin):
}
self.headers = {
- 'Authorization': "Bearer %s" % token,
- 'User-Agent': "ansible %s Python %s" % (ansible_version, python_version.split(' ', 1)[0]),
+ 'Authorization': f"Bearer {token}",
+ 'User-Agent': f"ansible {ansible_version} Python {python_version.split(' ', 1)[0]}",
'Content-type': 'application/json'
}
diff --git a/plugins/inventory/opennebula.py b/plugins/inventory/opennebula.py
index 077d3da5a3..8ced301dd1 100644
--- a/plugins/inventory/opennebula.py
+++ b/plugins/inventory/opennebula.py
@@ -3,81 +3,78 @@
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
+from __future__ import annotations
-__metaclass__ = type
-DOCUMENTATION = r'''
- name: opennebula
- author:
- - Kristian Feldsam (@feldsam)
- short_description: OpenNebula inventory source
- version_added: "3.8.0"
- extends_documentation_fragment:
- - constructed
+DOCUMENTATION = r"""
+name: opennebula
+author:
+ - Kristian Feldsam (@feldsam)
+short_description: OpenNebula inventory source
+version_added: "3.8.0"
+extends_documentation_fragment:
+ - constructed
+description:
+ - Get inventory hosts from OpenNebula cloud.
+ - Uses an YAML configuration file ending with either C(opennebula.yml) or C(opennebula.yaml) to set parameter values.
+ - Uses O(api_authfile), C(~/.one/one_auth), or E(ONE_AUTH) pointing to a OpenNebula credentials file.
+options:
+ plugin:
+ description: Token that ensures this is a source file for the 'opennebula' plugin.
+ type: string
+ required: true
+ choices: [community.general.opennebula]
+ api_url:
description:
- - Get inventory hosts from OpenNebula cloud.
- - Uses an YAML configuration file ending with either C(opennebula.yml) or C(opennebula.yaml)
- to set parameter values.
- - Uses O(api_authfile), C(~/.one/one_auth), or E(ONE_AUTH) pointing to a OpenNebula credentials file.
- options:
- plugin:
- description: Token that ensures this is a source file for the 'opennebula' plugin.
- type: string
- required: true
- choices: [ community.general.opennebula ]
- api_url:
- description:
- - URL of the OpenNebula RPC server.
- - It is recommended to use HTTPS so that the username/password are not
- transferred over the network unencrypted.
- - If not set then the value of the E(ONE_URL) environment variable is used.
- env:
- - name: ONE_URL
- required: true
- type: string
- api_username:
- description:
- - Name of the user to login into the OpenNebula RPC server. If not set
- then the value of the E(ONE_USERNAME) environment variable is used.
- env:
- - name: ONE_USERNAME
- type: string
- api_password:
- description:
- - Password or a token of the user to login into OpenNebula RPC server.
- - If not set, the value of the E(ONE_PASSWORD) environment variable is used.
- env:
- - name: ONE_PASSWORD
- required: false
- type: string
- api_authfile:
- description:
- - If both O(api_username) or O(api_password) are not set, then it will try
- authenticate with ONE auth file. Default path is C(~/.one/one_auth).
- - Set environment variable E(ONE_AUTH) to override this path.
- env:
- - name: ONE_AUTH
- required: false
- type: string
- hostname:
- description: Field to match the hostname. Note V(v4_first_ip) corresponds to the first IPv4 found on VM.
- type: string
- default: v4_first_ip
- choices:
- - v4_first_ip
- - v6_first_ip
- - name
- filter_by_label:
- description: Only return servers filtered by this label.
- type: string
- group_by_labels:
- description: Create host groups by vm labels
- type: bool
- default: true
-'''
+ - URL of the OpenNebula RPC server.
+ - It is recommended to use HTTPS so that the username/password are not transferred over the network unencrypted.
+ - If not set then the value of the E(ONE_URL) environment variable is used.
+ env:
+ - name: ONE_URL
+ required: true
+ type: string
+ api_username:
+ description:
+ - Name of the user to login into the OpenNebula RPC server. If not set then the value of the E(ONE_USERNAME) environment
+ variable is used.
+ env:
+ - name: ONE_USERNAME
+ type: string
+ api_password:
+ description:
+ - Password or a token of the user to login into OpenNebula RPC server.
+ - If not set, the value of the E(ONE_PASSWORD) environment variable is used.
+ env:
+ - name: ONE_PASSWORD
+ required: false
+ type: string
+ api_authfile:
+ description:
+ - If both O(api_username) or O(api_password) are not set, then it tries to authenticate with ONE auth file. Default
+ path is C(~/.one/one_auth).
+ - Set environment variable E(ONE_AUTH) to override this path.
+ env:
+ - name: ONE_AUTH
+ required: false
+ type: string
+ hostname:
+ description: Field to match the hostname. Note V(v4_first_ip) corresponds to the first IPv4 found on VM.
+ type: string
+ default: v4_first_ip
+ choices:
+ - v4_first_ip
+ - v6_first_ip
+ - name
+ filter_by_label:
+ description: Only return servers filtered by this label.
+ type: string
+ group_by_labels:
+ description: Create host groups by VM labels.
+ type: bool
+ default: true
+"""
-EXAMPLES = r'''
+EXAMPLES = r"""
# inventory_opennebula.yml file in YAML format
# Example command line: ansible-inventory --list -i inventory_opennebula.yml
@@ -85,7 +82,7 @@ EXAMPLES = r'''
plugin: community.general.opennebula
api_url: https://opennebula:2633/RPC2
filter_by_label: Cache
-'''
+"""
try:
import pyone
@@ -96,7 +93,6 @@ except ImportError:
from ansible.errors import AnsibleError
from ansible.plugins.inventory import BaseInventoryPlugin, Constructable
-from ansible.module_utils.common.text.converters import to_native
from ansible_collections.community.general.plugins.plugin_utils.unsafe import make_unsafe
@@ -128,9 +124,9 @@ class InventoryModule(BaseInventoryPlugin, Constructable):
authstring = fp.read().rstrip()
username, password = authstring.split(":")
except (OSError, IOError):
- raise AnsibleError("Could not find or read ONE_AUTH file at '{e}'".format(e=authfile))
+ raise AnsibleError(f"Could not find or read ONE_AUTH file at '{authfile}'")
except Exception:
- raise AnsibleError("Error occurs when reading ONE_AUTH file at '{e}'".format(e=authfile))
+ raise AnsibleError(f"Error occurs when reading ONE_AUTH file at '{authfile}'")
auth_params = namedtuple('auth', ('url', 'username', 'password'))
@@ -166,13 +162,13 @@ class InventoryModule(BaseInventoryPlugin, Constructable):
if not (auth.username and auth.password):
raise AnsibleError('API Credentials missing. Check OpenNebula inventory file.')
else:
- one_client = pyone.OneServer(auth.url, session=auth.username + ':' + auth.password)
+ one_client = pyone.OneServer(auth.url, session=f"{auth.username}:{auth.password}")
# get hosts (VMs)
try:
vm_pool = one_client.vmpool.infoextended(-2, -1, -1, 3)
except Exception as e:
- raise AnsibleError("Something happened during XML-RPC call: {e}".format(e=to_native(e)))
+ raise AnsibleError(f"Something happened during XML-RPC call: {e}")
return vm_pool
diff --git a/plugins/inventory/proxmox.py b/plugins/inventory/proxmox.py
deleted file mode 100644
index 975c344aef..0000000000
--- a/plugins/inventory/proxmox.py
+++ /dev/null
@@ -1,710 +0,0 @@
-# -*- coding: utf-8 -*-
-# Copyright (C) 2016 Guido Günther , Daniel Lobato Garcia
-# Copyright (c) 2018 Ansible Project
-# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
-# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
-
-__metaclass__ = type
-
-DOCUMENTATION = '''
- name: proxmox
- short_description: Proxmox inventory source
- version_added: "1.2.0"
- author:
- - Jeffrey van Pelt (@Thulium-Drake)
- requirements:
- - requests >= 1.1
- description:
- - Get inventory hosts from a Proxmox PVE cluster.
- - "Uses a configuration file as an inventory source, it must end in C(.proxmox.yml) or C(.proxmox.yaml)"
- - Will retrieve the first network interface with an IP for Proxmox nodes.
- - Can retrieve LXC/QEMU configuration as facts.
- extends_documentation_fragment:
- - constructed
- - inventory_cache
- options:
- plugin:
- description: The name of this plugin, it should always be set to V(community.general.proxmox) for this plugin to recognize it as it's own.
- required: true
- choices: ['community.general.proxmox']
- type: str
- url:
- description:
- - URL to Proxmox cluster.
- - If the value is not specified in the inventory configuration, the value of environment variable E(PROXMOX_URL) will be used instead.
- - Since community.general 4.7.0 you can also use templating to specify the value of the O(url).
- default: 'http://localhost:8006'
- type: str
- env:
- - name: PROXMOX_URL
- version_added: 2.0.0
- user:
- description:
- - Proxmox authentication user.
- - If the value is not specified in the inventory configuration, the value of environment variable E(PROXMOX_USER) will be used instead.
- - Since community.general 4.7.0 you can also use templating to specify the value of the O(user).
- required: true
- type: str
- env:
- - name: PROXMOX_USER
- version_added: 2.0.0
- password:
- description:
- - Proxmox authentication password.
- - If the value is not specified in the inventory configuration, the value of environment variable E(PROXMOX_PASSWORD) will be used instead.
- - Since community.general 4.7.0 you can also use templating to specify the value of the O(password).
- - If you do not specify a password, you must set O(token_id) and O(token_secret) instead.
- type: str
- env:
- - name: PROXMOX_PASSWORD
- version_added: 2.0.0
- token_id:
- description:
- - Proxmox authentication token ID.
- - If the value is not specified in the inventory configuration, the value of environment variable E(PROXMOX_TOKEN_ID) will be used instead.
- - To use token authentication, you must also specify O(token_secret). If you do not specify O(token_id) and O(token_secret),
- you must set a password instead.
- - Make sure to grant explicit pve permissions to the token or disable 'privilege separation' to use the users' privileges instead.
- version_added: 4.8.0
- type: str
- env:
- - name: PROXMOX_TOKEN_ID
- token_secret:
- description:
- - Proxmox authentication token secret.
- - If the value is not specified in the inventory configuration, the value of environment variable E(PROXMOX_TOKEN_SECRET) will be used instead.
- - To use token authentication, you must also specify O(token_id). If you do not specify O(token_id) and O(token_secret),
- you must set a password instead.
- version_added: 4.8.0
- type: str
- env:
- - name: PROXMOX_TOKEN_SECRET
- validate_certs:
- description: Verify SSL certificate if using HTTPS.
- type: boolean
- default: true
- group_prefix:
- description: Prefix to apply to Proxmox groups.
- default: proxmox_
- type: str
- facts_prefix:
- description: Prefix to apply to LXC/QEMU config facts.
- default: proxmox_
- type: str
- want_facts:
- description:
- - Gather LXC/QEMU configuration facts.
- - When O(want_facts) is set to V(true) more details about QEMU VM status are possible, besides the running and stopped states.
- Currently if the VM is running and it is suspended, the status will be running and the machine will be in C(running) group,
- but its actual state will be paused. See O(qemu_extended_statuses) for how to retrieve the real status.
- default: false
- type: bool
- qemu_extended_statuses:
- description:
- - Requires O(want_facts) to be set to V(true) to function. This will allow you to differentiate between C(paused) and C(prelaunch)
- statuses of the QEMU VMs.
- - This introduces multiple groups [prefixed with O(group_prefix)] C(prelaunch) and C(paused).
- default: false
- type: bool
- version_added: 5.1.0
- want_proxmox_nodes_ansible_host:
- version_added: 3.0.0
- description:
- - Whether to set C(ansible_host) for proxmox nodes.
- - When set to V(true) (default), will use the first available interface. This can be different from what you expect.
- - The default of this option changed from V(true) to V(false) in community.general 6.0.0.
- type: bool
- default: false
- exclude_nodes:
- description: Exclude proxmox nodes and the nodes-group from the inventory output.
- type: bool
- default: false
- version_added: 8.1.0
- filters:
- version_added: 4.6.0
- description: A list of Jinja templates that allow filtering hosts.
- type: list
- elements: str
- default: []
- strict:
- version_added: 2.5.0
- compose:
- version_added: 2.5.0
- groups:
- version_added: 2.5.0
- keyed_groups:
- version_added: 2.5.0
-'''
-
-EXAMPLES = '''
-# Minimal example which will not gather additional facts for QEMU/LXC guests
-# By not specifying a URL the plugin will attempt to connect to the controller host on port 8006
-# my.proxmox.yml
-plugin: community.general.proxmox
-user: ansible@pve
-password: secure
-# Note that this can easily give you wrong values as ansible_host. See further below for
-# an example where this is set to `false` and where ansible_host is set with `compose`.
-want_proxmox_nodes_ansible_host: true
-
-# Instead of login with password, proxmox supports api token authentication since release 6.2.
-plugin: community.general.proxmox
-user: ci@pve
-token_id: gitlab-1
-token_secret: fa256e9c-26ab-41ec-82da-707a2c079829
-
-# The secret can also be a vault string or passed via the environment variable TOKEN_SECRET.
-token_secret: !vault |
- $ANSIBLE_VAULT;1.1;AES256
- 62353634333163633336343265623632626339313032653563653165313262343931643431656138
- 6134333736323265656466646539663134306166666237630a653363623262636663333762316136
- 34616361326263383766366663393837626437316462313332663736623066656237386531663731
- 3037646432383064630a663165303564623338666131353366373630656661333437393937343331
- 32643131386134396336623736393634373936356332623632306561356361323737313663633633
- 6231313333666361656537343562333337323030623732323833
-
-# More complete example demonstrating the use of 'want_facts' and the constructed options
-# Note that using facts returned by 'want_facts' in constructed options requires 'want_facts=true'
-# my.proxmox.yml
-plugin: community.general.proxmox
-url: http://pve.domain.com:8006
-user: ansible@pve
-password: secure
-want_facts: true
-keyed_groups:
- # proxmox_tags_parsed is an example of a fact only returned when 'want_facts=true'
- - key: proxmox_tags_parsed
- separator: ""
- prefix: group
-groups:
- webservers: "'web' in (proxmox_tags_parsed|list)"
- mailservers: "'mail' in (proxmox_tags_parsed|list)"
-compose:
- ansible_port: 2222
-# Note that this can easily give you wrong values as ansible_host. See further below for
-# an example where this is set to `false` and where ansible_host is set with `compose`.
-want_proxmox_nodes_ansible_host: true
-
-# Using the inventory to allow ansible to connect via the first IP address of the VM / Container
-# (Default is connection by name of QEMU/LXC guests)
-# Note: my_inv_var demonstrates how to add a string variable to every host used by the inventory.
-# my.proxmox.yml
-plugin: community.general.proxmox
-url: http://192.168.1.2:8006
-user: ansible@pve
-password: secure
-validate_certs: false # only do this when you trust the network!
-want_facts: true
-want_proxmox_nodes_ansible_host: false
-compose:
- ansible_host: proxmox_ipconfig0.ip | default(proxmox_net0.ip) | ipaddr('address')
- my_inv_var_1: "'my_var1_value'"
- my_inv_var_2: >
- "my_var_2_value"
-
-# Specify the url, user and password using templating
-# my.proxmox.yml
-plugin: community.general.proxmox
-url: "{{ lookup('ansible.builtin.ini', 'url', section='proxmox', file='file.ini') }}"
-user: "{{ lookup('ansible.builtin.env','PM_USER') | default('ansible@pve') }}"
-password: "{{ lookup('community.general.random_string', base64=True) }}"
-# Note that this can easily give you wrong values as ansible_host. See further up for
-# an example where this is set to `false` and where ansible_host is set with `compose`.
-want_proxmox_nodes_ansible_host: true
-
-'''
-
-import itertools
-import re
-
-from ansible.module_utils.common._collections_compat import MutableMapping
-
-from ansible.errors import AnsibleError
-from ansible.plugins.inventory import BaseInventoryPlugin, Constructable, Cacheable
-from ansible.module_utils.common.text.converters import to_native
-from ansible.module_utils.six import string_types
-from ansible.module_utils.six.moves.urllib.parse import urlencode
-from ansible.utils.display import Display
-
-from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
-from ansible_collections.community.general.plugins.plugin_utils.unsafe import make_unsafe
-
-# 3rd party imports
-try:
- import requests
- if LooseVersion(requests.__version__) < LooseVersion('1.1.0'):
- raise ImportError
- HAS_REQUESTS = True
-except ImportError:
- HAS_REQUESTS = False
-
-display = Display()
-
-
-class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
- ''' Host inventory parser for ansible using Proxmox as source. '''
-
- NAME = 'community.general.proxmox'
-
- def __init__(self):
-
- super(InventoryModule, self).__init__()
-
- # from config
- self.proxmox_url = None
-
- self.session = None
- self.cache_key = None
- self.use_cache = None
-
- def verify_file(self, path):
-
- valid = False
- if super(InventoryModule, self).verify_file(path):
- if path.endswith(('proxmox.yaml', 'proxmox.yml')):
- valid = True
- else:
- self.display.vvv('Skipping due to inventory source not ending in "proxmox.yaml" nor "proxmox.yml"')
- return valid
-
- def _get_session(self):
- if not self.session:
- self.session = requests.session()
- self.session.verify = self.get_option('validate_certs')
- return self.session
-
- def _get_auth(self):
-
- validate_certs = self.get_option('validate_certs')
-
- if validate_certs is False:
- from requests.packages.urllib3 import disable_warnings
- disable_warnings()
-
- if self.proxmox_password:
-
- credentials = urlencode({'username': self.proxmox_user, 'password': self.proxmox_password})
-
- a = self._get_session()
-
- ret = a.post('%s/api2/json/access/ticket' % self.proxmox_url, data=credentials)
-
- json = ret.json()
-
- self.headers = {
- # only required for POST/PUT/DELETE methods, which we are not using currently
- # 'CSRFPreventionToken': json['data']['CSRFPreventionToken'],
- 'Cookie': 'PVEAuthCookie={0}'.format(json['data']['ticket'])
- }
-
- else:
-
- self.headers = {'Authorization': 'PVEAPIToken={0}!{1}={2}'.format(self.proxmox_user, self.proxmox_token_id, self.proxmox_token_secret)}
-
- def _get_json(self, url, ignore_errors=None):
-
- data = []
- has_data = False
-
- if self.use_cache:
- try:
- data = self._cache[self.cache_key][url]
- has_data = True
- except KeyError:
- self.update_cache = True
-
- if not has_data:
- s = self._get_session()
- while True:
- ret = s.get(url, headers=self.headers)
- if ignore_errors and ret.status_code in ignore_errors:
- break
- ret.raise_for_status()
- json = ret.json()
-
- # process results
- # FIXME: This assumes 'return type' matches a specific query,
- # it will break if we expand the queries and they dont have different types
- if 'data' not in json:
- # /hosts/:id does not have a 'data' key
- data = json
- break
- elif isinstance(json['data'], MutableMapping):
- # /facts are returned as dict in 'data'
- data = json['data']
- break
- else:
- if json['data']:
- # /hosts 's 'results' is a list of all hosts, returned is paginated
- data = data + json['data']
- break
-
- self._results[url] = data
- return make_unsafe(data)
-
- def _get_nodes(self):
- return self._get_json("%s/api2/json/nodes" % self.proxmox_url)
-
- def _get_pools(self):
- return self._get_json("%s/api2/json/pools" % self.proxmox_url)
-
- def _get_lxc_per_node(self, node):
- return self._get_json("%s/api2/json/nodes/%s/lxc" % (self.proxmox_url, node))
-
- def _get_qemu_per_node(self, node):
- return self._get_json("%s/api2/json/nodes/%s/qemu" % (self.proxmox_url, node))
-
- def _get_members_per_pool(self, pool):
- ret = self._get_json("%s/api2/json/pools/%s" % (self.proxmox_url, pool))
- return ret['members']
-
- def _get_node_ip(self, node):
- ret = self._get_json("%s/api2/json/nodes/%s/network" % (self.proxmox_url, node))
-
- # sort interface by iface name to make selection as stable as possible
- ret.sort(key=lambda x: x['iface'])
-
- for iface in ret:
- try:
- # only process interfaces adhering to these rules
- if 'active' not in iface:
- self.display.vvv(f"Interface {iface['iface']} on node {node} does not have an active state")
- continue
- if 'address' not in iface:
- self.display.vvv(f"Interface {iface['iface']} on node {node} does not have an address")
- continue
- if 'gateway' not in iface:
- self.display.vvv(f"Interface {iface['iface']} on node {node} does not have a gateway")
- continue
- self.display.vv(f"Using interface {iface['iface']} on node {node} with address {iface['address']} as node ip for ansible_host")
- return iface['address']
- except Exception:
- continue
- return None
-
- def _get_lxc_interfaces(self, properties, node, vmid):
- status_key = self._fact('status')
-
- if status_key not in properties or not properties[status_key] == 'running':
- return
-
- ret = self._get_json("%s/api2/json/nodes/%s/lxc/%s/interfaces" % (self.proxmox_url, node, vmid), ignore_errors=[501])
- if not ret:
- return
-
- result = []
-
- for iface in ret:
- result_iface = {
- 'name': iface['name'],
- 'hwaddr': iface['hwaddr']
- }
-
- if 'inet' in iface:
- result_iface['inet'] = iface['inet']
-
- if 'inet6' in iface:
- result_iface['inet6'] = iface['inet6']
-
- result.append(result_iface)
-
- properties[self._fact('lxc_interfaces')] = result
-
- def _get_agent_network_interfaces(self, node, vmid, vmtype):
- result = []
-
- try:
- ifaces = self._get_json(
- "%s/api2/json/nodes/%s/%s/%s/agent/network-get-interfaces" % (
- self.proxmox_url, node, vmtype, vmid
- )
- )['result']
-
- if "error" in ifaces:
- if "class" in ifaces["error"]:
- # This happens on Windows, even though qemu agent is running, the IP address
- # cannot be fetched, as it's unsupported, also a command disabled can happen.
- errorClass = ifaces["error"]["class"]
- if errorClass in ["Unsupported"]:
- self.display.v("Retrieving network interfaces from guest agents on windows with older qemu-guest-agents is not supported")
- elif errorClass in ["CommandDisabled"]:
- self.display.v("Retrieving network interfaces from guest agents has been disabled")
- return result
-
- for iface in ifaces:
- result.append({
- 'name': iface['name'],
- 'mac-address': iface['hardware-address'] if 'hardware-address' in iface else '',
- 'ip-addresses': ["%s/%s" % (ip['ip-address'], ip['prefix']) for ip in iface['ip-addresses']] if 'ip-addresses' in iface else []
- })
- except requests.HTTPError:
- pass
-
- return result
-
- def _get_vm_config(self, properties, node, vmid, vmtype, name):
- ret = self._get_json("%s/api2/json/nodes/%s/%s/%s/config" % (self.proxmox_url, node, vmtype, vmid))
-
- properties[self._fact('node')] = node
- properties[self._fact('vmid')] = vmid
- properties[self._fact('vmtype')] = vmtype
-
- plaintext_configs = [
- 'description',
- ]
-
- for config in ret:
- key = self._fact(config)
- value = ret[config]
- try:
- # fixup disk images as they have no key
- if config == 'rootfs' or config.startswith(('virtio', 'sata', 'ide', 'scsi')):
- value = ('disk_image=' + value)
-
- # Additional field containing parsed tags as list
- if config == 'tags':
- stripped_value = value.strip()
- if stripped_value:
- parsed_key = key + "_parsed"
- properties[parsed_key] = [tag.strip() for tag in stripped_value.replace(',', ';').split(";")]
-
- # The first field in the agent string tells you whether the agent is enabled
- # the rest of the comma separated string is extra config for the agent.
- # In some (newer versions of proxmox) instances it can be 'enabled=1'.
- if config == 'agent':
- agent_enabled = 0
- try:
- agent_enabled = int(value.split(',')[0])
- except ValueError:
- if value.split(',')[0] == "enabled=1":
- agent_enabled = 1
- if agent_enabled:
- agent_iface_value = self._get_agent_network_interfaces(node, vmid, vmtype)
- if agent_iface_value:
- agent_iface_key = self.to_safe('%s%s' % (key, "_interfaces"))
- properties[agent_iface_key] = agent_iface_value
-
- if config == 'lxc':
- out_val = {}
- for k, v in value:
- if k.startswith('lxc.'):
- k = k[len('lxc.'):]
- out_val[k] = v
- value = out_val
-
- if config not in plaintext_configs and isinstance(value, string_types) \
- and all("=" in v for v in value.split(",")):
- # split off strings with commas to a dict
- # skip over any keys that cannot be processed
- try:
- value = dict(key.split("=", 1) for key in value.split(","))
- except Exception:
- continue
-
- properties[key] = value
- except NameError:
- return None
-
- def _get_vm_status(self, properties, node, vmid, vmtype, name):
- ret = self._get_json("%s/api2/json/nodes/%s/%s/%s/status/current" % (self.proxmox_url, node, vmtype, vmid))
- properties[self._fact('status')] = ret['status']
- if vmtype == 'qemu':
- properties[self._fact('qmpstatus')] = ret['qmpstatus']
-
- def _get_vm_snapshots(self, properties, node, vmid, vmtype, name):
- ret = self._get_json("%s/api2/json/nodes/%s/%s/%s/snapshot" % (self.proxmox_url, node, vmtype, vmid))
- snapshots = [snapshot['name'] for snapshot in ret if snapshot['name'] != 'current']
- properties[self._fact('snapshots')] = snapshots
-
- def to_safe(self, word):
- '''Converts 'bad' characters in a string to underscores so they can be used as Ansible groups
- #> ProxmoxInventory.to_safe("foo-bar baz")
- 'foo_barbaz'
- '''
- regex = r"[^A-Za-z0-9\_]"
- return re.sub(regex, "_", word.replace(" ", ""))
-
- def _fact(self, name):
- '''Generate a fact's full name from the common prefix and a name.'''
- return self.to_safe('%s%s' % (self.facts_prefix, name.lower()))
-
- def _group(self, name):
- '''Generate a group's full name from the common prefix and a name.'''
- return self.to_safe('%s%s' % (self.group_prefix, name.lower()))
-
- def _can_add_host(self, name, properties):
- '''Ensure that a host satisfies all defined hosts filters. If strict mode is
- enabled, any error during host filter compositing will lead to an AnsibleError
- being raised, otherwise the filter will be ignored.
- '''
- for host_filter in self.host_filters:
- try:
- if not self._compose(host_filter, properties):
- return False
- except Exception as e: # pylint: disable=broad-except
- message = "Could not evaluate host filter %s for host %s - %s" % (host_filter, name, to_native(e))
- if self.strict:
- raise AnsibleError(message)
- display.warning(message)
- return True
-
- def _add_host(self, name, variables):
- self.inventory.add_host(name)
- for k, v in variables.items():
- self.inventory.set_variable(name, k, v)
- variables = self.inventory.get_host(name).get_vars()
- self._set_composite_vars(self.get_option('compose'), variables, name, strict=self.strict)
- self._add_host_to_composed_groups(self.get_option('groups'), variables, name, strict=self.strict)
- self._add_host_to_keyed_groups(self.get_option('keyed_groups'), variables, name, strict=self.strict)
-
- def _handle_item(self, node, ittype, item):
- '''Handle an item from the list of LXC containers and Qemu VM. The
- return value will be either None if the item was skipped or the name of
- the item if it was added to the inventory.'''
- if item.get('template'):
- return None
-
- properties = dict()
- name, vmid = item['name'], item['vmid']
-
- # get status, config and snapshots if want_facts == True
- want_facts = self.get_option('want_facts')
- if want_facts:
- self._get_vm_status(properties, node, vmid, ittype, name)
- self._get_vm_config(properties, node, vmid, ittype, name)
- self._get_vm_snapshots(properties, node, vmid, ittype, name)
-
- if ittype == 'lxc':
- self._get_lxc_interfaces(properties, node, vmid)
-
- # ensure the host satisfies filters
- if not self._can_add_host(name, properties):
- return None
-
- # add the host to the inventory
- self._add_host(name, properties)
- node_type_group = self._group('%s_%s' % (node, ittype))
- self.inventory.add_child(self._group('all_' + ittype), name)
- self.inventory.add_child(node_type_group, name)
-
- item_status = item['status']
- if item_status == 'running':
- if want_facts and ittype == 'qemu' and self.get_option('qemu_extended_statuses'):
- # get more details about the status of the qemu VM
- item_status = properties.get(self._fact('qmpstatus'), item_status)
- self.inventory.add_child(self._group('all_%s' % (item_status, )), name)
-
- return name
-
- def _populate_pool_groups(self, added_hosts):
- '''Generate groups from Proxmox resource pools, ignoring VMs and
- containers that were skipped.'''
- for pool in self._get_pools():
- poolid = pool.get('poolid')
- if not poolid:
- continue
- pool_group = self._group('pool_' + poolid)
- self.inventory.add_group(pool_group)
-
- for member in self._get_members_per_pool(poolid):
- name = member.get('name')
- if name and name in added_hosts:
- self.inventory.add_child(pool_group, name)
-
- def _populate(self):
-
- # create common groups
- default_groups = ['lxc', 'qemu', 'running', 'stopped']
-
- if self.get_option('qemu_extended_statuses'):
- default_groups.extend(['prelaunch', 'paused'])
-
- for group in default_groups:
- self.inventory.add_group(self._group('all_%s' % (group)))
- nodes_group = self._group('nodes')
- if not self.exclude_nodes:
- self.inventory.add_group(nodes_group)
-
- want_proxmox_nodes_ansible_host = self.get_option("want_proxmox_nodes_ansible_host")
-
- # gather vm's on nodes
- self._get_auth()
- hosts = []
- for node in self._get_nodes():
- if not node.get('node'):
- continue
- if not self.exclude_nodes:
- self.inventory.add_host(node['node'])
- if node['type'] == 'node' and not self.exclude_nodes:
- self.inventory.add_child(nodes_group, node['node'])
-
- if node['status'] == 'offline':
- continue
-
- # get node IP address
- if want_proxmox_nodes_ansible_host and not self.exclude_nodes:
- ip = self._get_node_ip(node['node'])
- self.inventory.set_variable(node['node'], 'ansible_host', ip)
-
- # Setting composite variables
- if not self.exclude_nodes:
- variables = self.inventory.get_host(node['node']).get_vars()
- self._set_composite_vars(self.get_option('compose'), variables, node['node'], strict=self.strict)
-
- # add LXC/Qemu groups for the node
- for ittype in ('lxc', 'qemu'):
- node_type_group = self._group('%s_%s' % (node['node'], ittype))
- self.inventory.add_group(node_type_group)
-
- # get LXC containers and Qemu VMs for this node
- lxc_objects = zip(itertools.repeat('lxc'), self._get_lxc_per_node(node['node']))
- qemu_objects = zip(itertools.repeat('qemu'), self._get_qemu_per_node(node['node']))
- for ittype, item in itertools.chain(lxc_objects, qemu_objects):
- name = self._handle_item(node['node'], ittype, item)
- if name is not None:
- hosts.append(name)
-
- # gather vm's in pools
- self._populate_pool_groups(hosts)
-
- def parse(self, inventory, loader, path, cache=True):
- if not HAS_REQUESTS:
- raise AnsibleError('This module requires Python Requests 1.1.0 or higher: '
- 'https://github.com/psf/requests.')
-
- super(InventoryModule, self).parse(inventory, loader, path)
-
- # read config from file, this sets 'options'
- self._read_config_data(path)
-
- # read and template auth options
- for o in ('url', 'user', 'password', 'token_id', 'token_secret'):
- v = self.get_option(o)
- if self.templar.is_template(v):
- v = self.templar.template(v, disable_lookups=False)
- setattr(self, 'proxmox_%s' % o, v)
-
- # some more cleanup and validation
- self.proxmox_url = self.proxmox_url.rstrip('/')
-
- if self.proxmox_password is None and (self.proxmox_token_id is None or self.proxmox_token_secret is None):
- raise AnsibleError('You must specify either a password or both token_id and token_secret.')
-
- if self.get_option('qemu_extended_statuses') and not self.get_option('want_facts'):
- raise AnsibleError('You must set want_facts to True if you want to use qemu_extended_statuses.')
- # read rest of options
- self.exclude_nodes = self.get_option('exclude_nodes')
- self.cache_key = self.get_cache_key(path)
- self.use_cache = cache and self.get_option('cache')
- self.update_cache = not cache and self.get_option('cache')
- self.host_filters = self.get_option('filters')
- self.group_prefix = self.get_option('group_prefix')
- self.facts_prefix = self.get_option('facts_prefix')
- self.strict = self.get_option('strict')
-
- # actually populate inventory
- self._results = {}
- self._populate()
- if self.update_cache:
- self._cache[self.cache_key] = self._results
diff --git a/plugins/inventory/scaleway.py b/plugins/inventory/scaleway.py
index 4205caeca7..c730049833 100644
--- a/plugins/inventory/scaleway.py
+++ b/plugins/inventory/scaleway.py
@@ -3,80 +3,79 @@
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
+from __future__ import annotations
-__metaclass__ = type
-DOCUMENTATION = r'''
- name: scaleway
- author:
- - Remy Leone (@remyleone)
- short_description: Scaleway inventory source
+DOCUMENTATION = r"""
+name: scaleway
+author:
+ - Remy Leone (@remyleone)
+short_description: Scaleway inventory source
+description:
+ - Get inventory hosts from Scaleway.
+requirements:
+ - PyYAML
+options:
+ plugin:
+ description: Token that ensures this is a source file for the 'scaleway' plugin.
+ required: true
+ type: string
+ choices: ['scaleway', 'community.general.scaleway']
+ regions:
+ description: Filter results on a specific Scaleway region.
+ type: list
+ elements: string
+ default:
+ - ams1
+ - par1
+ - par2
+ - waw1
+ tags:
+ description: Filter results on a specific tag.
+ type: list
+ elements: string
+ scw_profile:
description:
- - Get inventory hosts from Scaleway.
- requirements:
- - PyYAML
- options:
- plugin:
- description: Token that ensures this is a source file for the 'scaleway' plugin.
- required: true
- type: string
- choices: ['scaleway', 'community.general.scaleway']
- regions:
- description: Filter results on a specific Scaleway region.
- type: list
- elements: string
- default:
- - ams1
- - par1
- - par2
- - waw1
- tags:
- description: Filter results on a specific tag.
- type: list
- elements: string
- scw_profile:
- description:
- - The config profile to use in config file.
- - By default uses the one specified as C(active_profile) in the config file, or falls back to V(default) if that is not defined.
- type: string
- version_added: 4.4.0
- oauth_token:
- description:
- - Scaleway OAuth token.
- - If not explicitly defined or in environment variables, it will try to lookup in the scaleway-cli configuration file
- (C($SCW_CONFIG_PATH), C($XDG_CONFIG_HOME/scw/config.yaml), or C(~/.config/scw/config.yaml)).
- - More details on L(how to generate token, https://www.scaleway.com/en/docs/generate-api-keys/).
- type: string
- env:
- # in order of precedence
- - name: SCW_TOKEN
- - name: SCW_API_KEY
- - name: SCW_OAUTH_TOKEN
- hostnames:
- description: List of preference about what to use as an hostname.
- type: list
- elements: string
- default:
- - public_ipv4
- choices:
- - public_ipv4
- - private_ipv4
- - public_ipv6
- - hostname
- - id
- variables:
- description: 'Set individual variables: keys are variable names and
- values are templates. Any value returned by the
- L(Scaleway API, https://developer.scaleway.com/#servers-server-get)
- can be used.'
- type: dict
-'''
+ - The config profile to use in config file.
+ - By default uses the one specified as C(active_profile) in the config file, or falls back to V(default) if that is
+ not defined.
+ type: string
+ version_added: 4.4.0
+ oauth_token:
+ description:
+ - Scaleway OAuth token.
+ - If not explicitly defined or in environment variables, it tries to lookup in the C(scaleway-cli) configuration file
+ (C($SCW_CONFIG_PATH), C($XDG_CONFIG_HOME/scw/config.yaml), or C(~/.config/scw/config.yaml)).
+ - More details on L(how to generate token, https://www.scaleway.com/en/docs/generate-api-keys/).
+ type: string
+ env:
+ # in order of precedence
+ - name: SCW_TOKEN
+ - name: SCW_API_KEY
+ - name: SCW_OAUTH_TOKEN
+ hostnames:
+ description: List of preference about what to use as an hostname.
+ type: list
+ elements: string
+ default:
+ - public_ipv4
+ choices:
+ - public_ipv4
+ - private_ipv4
+ - public_ipv6
+ - hostname
+ - id
+ variables:
+ description: 'Set individual variables: keys are variable names and values are templates. Any value returned by the L(Scaleway
+ API, https://developer.scaleway.com/#servers-server-get) can be used.'
+ type: dict
+"""
-EXAMPLES = r'''
+EXAMPLES = r"""
# scaleway_inventory.yml file in YAML format
# Example command line: ansible-inventory --list -i scaleway_inventory.yml
+---
# use hostname as inventory_hostname
# use the private IP address to connect to the host
plugin: community.general.scaleway
@@ -91,6 +90,7 @@ variables:
ansible_host: private_ip
state: state
+---
# use hostname as inventory_hostname and public IP address to connect to the host
plugin: community.general.scaleway
hostnames:
@@ -100,6 +100,7 @@ regions:
variables:
ansible_host: public_ip.address
+---
# Using static strings as variables
plugin: community.general.scaleway
hostnames:
@@ -108,7 +109,7 @@ variables:
ansible_host: public_ip.address
ansible_connection: "'ssh'"
ansible_user: "'admin'"
-'''
+"""
import os
import json
@@ -125,7 +126,7 @@ from ansible.plugins.inventory import BaseInventoryPlugin, Constructable
from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_LOCATION, parse_pagination_link
from ansible_collections.community.general.plugins.plugin_utils.unsafe import make_unsafe
from ansible.module_utils.urls import open_url
-from ansible.module_utils.common.text.converters import to_native, to_text
+from ansible.module_utils.common.text.converters import to_text
from ansible.module_utils.six import raise_from
import ansible.module_utils.six.moves.urllib.parse as urllib_parse
@@ -140,7 +141,7 @@ def _fetch_information(token, url):
headers={'X-Auth-Token': token,
'Content-type': 'application/json'})
except Exception as e:
- raise AnsibleError("Error while fetching %s: %s" % (url, to_native(e)))
+ raise AnsibleError(f"Error while fetching {url}: {e}")
try:
raw_json = json.loads(to_text(response.read()))
except ValueError:
@@ -161,7 +162,7 @@ def _fetch_information(token, url):
def _build_server_url(api_endpoint):
- return "/".join([api_endpoint, "servers"])
+ return f"{api_endpoint}/servers"
def extract_public_ipv4(server_info):
diff --git a/plugins/inventory/stackpath_compute.py b/plugins/inventory/stackpath_compute.py
deleted file mode 100644
index 8508b4e797..0000000000
--- a/plugins/inventory/stackpath_compute.py
+++ /dev/null
@@ -1,286 +0,0 @@
-# -*- coding: utf-8 -*-
-# Copyright (c) 2020 Shay Rybak
-# Copyright (c) 2020 Ansible Project
-# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-DOCUMENTATION = '''
- name: stackpath_compute
- short_description: StackPath Edge Computing inventory source
- version_added: 1.2.0
- author:
- - UNKNOWN (@shayrybak)
- extends_documentation_fragment:
- - inventory_cache
- - constructed
- description:
- - Get inventory hosts from StackPath Edge Computing.
- - Uses a YAML configuration file that ends with stackpath_compute.(yml|yaml).
- options:
- plugin:
- description:
- - A token that ensures this is a source file for the plugin.
- required: true
- type: string
- choices: ['community.general.stackpath_compute']
- client_id:
- description:
- - An OAuth client ID generated from the API Management section of the StackPath customer portal
- U(https://control.stackpath.net/api-management).
- required: true
- type: str
- client_secret:
- description:
- - An OAuth client secret generated from the API Management section of the StackPath customer portal
- U(https://control.stackpath.net/api-management).
- required: true
- type: str
- stack_slugs:
- description:
- - A list of Stack slugs to query instances in. If no entry then get instances in all stacks on the account.
- type: list
- elements: str
- use_internal_ip:
- description:
- - Whether or not to use internal IP addresses, If false, uses external IP addresses, internal otherwise.
- - If an instance doesn't have an external IP it will not be returned when this option is set to false.
- type: bool
-'''
-
-EXAMPLES = '''
-# Example using credentials to fetch all workload instances in a stack.
----
-plugin: community.general.stackpath_compute
-client_id: my_client_id
-client_secret: my_client_secret
-stack_slugs:
-- my_first_stack_slug
-- my_other_stack_slug
-use_internal_ip: false
-'''
-
-import traceback
-import json
-
-from ansible.errors import AnsibleError
-from ansible.module_utils.urls import open_url
-from ansible.plugins.inventory import (
- BaseInventoryPlugin,
- Constructable,
- Cacheable
-)
-from ansible.utils.display import Display
-
-from ansible_collections.community.general.plugins.plugin_utils.unsafe import make_unsafe
-
-
-display = Display()
-
-
-class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
-
- NAME = 'community.general.stackpath_compute'
-
- def __init__(self):
- super(InventoryModule, self).__init__()
-
- # credentials
- self.client_id = None
- self.client_secret = None
- self.stack_slug = None
- self.api_host = "https://gateway.stackpath.com"
- self.group_keys = [
- "stackSlug",
- "workloadId",
- "cityCode",
- "countryCode",
- "continent",
- "target",
- "name",
- "workloadSlug"
- ]
-
- def _validate_config(self, config):
- if config['plugin'] != 'community.general.stackpath_compute':
- raise AnsibleError("plugin doesn't match this plugin")
- try:
- client_id = config['client_id']
- if len(client_id) != 32:
- raise AnsibleError("client_id must be 32 characters long")
- except KeyError:
- raise AnsibleError("config missing client_id, a required option")
- try:
- client_secret = config['client_secret']
- if len(client_secret) != 64:
- raise AnsibleError("client_secret must be 64 characters long")
- except KeyError:
- raise AnsibleError("config missing client_id, a required option")
- return True
-
- def _set_credentials(self):
- '''
- :param config_data: contents of the inventory config file
- '''
- self.client_id = self.get_option('client_id')
- self.client_secret = self.get_option('client_secret')
-
- def _authenticate(self):
- payload = json.dumps(
- {
- "client_id": self.client_id,
- "client_secret": self.client_secret,
- "grant_type": "client_credentials",
- }
- )
- headers = {
- "Content-Type": "application/json",
- }
- resp = open_url(
- self.api_host + '/identity/v1/oauth2/token',
- headers=headers,
- data=payload,
- method="POST"
- )
- status_code = resp.code
- if status_code == 200:
- body = resp.read()
- self.auth_token = json.loads(body)["access_token"]
-
- def _query(self):
- results = []
- workloads = []
- self._authenticate()
- for stack_slug in self.stack_slugs:
- try:
- workloads = self._stackpath_query_get_list(self.api_host + '/workload/v1/stacks/' + stack_slug + '/workloads')
- except Exception:
- raise AnsibleError("Failed to get workloads from the StackPath API: %s" % traceback.format_exc())
- for workload in workloads:
- try:
- workload_instances = self._stackpath_query_get_list(
- self.api_host + '/workload/v1/stacks/' + stack_slug + '/workloads/' + workload["id"] + '/instances'
- )
- except Exception:
- raise AnsibleError("Failed to get workload instances from the StackPath API: %s" % traceback.format_exc())
- for instance in workload_instances:
- if instance["phase"] == "RUNNING":
- instance["stackSlug"] = stack_slug
- instance["workloadId"] = workload["id"]
- instance["workloadSlug"] = workload["slug"]
- instance["cityCode"] = instance["location"]["cityCode"]
- instance["countryCode"] = instance["location"]["countryCode"]
- instance["continent"] = instance["location"]["continent"]
- instance["target"] = instance["metadata"]["labels"]["workload.platform.stackpath.net/target-name"]
- try:
- if instance[self.hostname_key]:
- results.append(instance)
- except KeyError:
- pass
- return results
-
- def _populate(self, instances):
- for instance in instances:
- for group_key in self.group_keys:
- group = group_key + "_" + instance[group_key]
- group = group.lower().replace(" ", "_").replace("-", "_")
- self.inventory.add_group(group)
- self.inventory.add_host(instance[self.hostname_key],
- group=group)
-
- def _stackpath_query_get_list(self, url):
- self._authenticate()
- headers = {
- "Content-Type": "application/json",
- "Authorization": "Bearer " + self.auth_token,
- }
- next_page = True
- result = []
- cursor = '-1'
- while next_page:
- resp = open_url(
- url + '?page_request.first=10&page_request.after=%s' % cursor,
- headers=headers,
- method="GET"
- )
- status_code = resp.code
- if status_code == 200:
- body = resp.read()
- body_json = json.loads(body)
- result.extend(body_json["results"])
- next_page = body_json["pageInfo"]["hasNextPage"]
- if next_page:
- cursor = body_json["pageInfo"]["endCursor"]
- return result
-
- def _get_stack_slugs(self, stacks):
- self.stack_slugs = [stack["slug"] for stack in stacks]
-
- def verify_file(self, path):
- '''
- :param loader: an ansible.parsing.dataloader.DataLoader object
- :param path: the path to the inventory config file
- :return the contents of the config file
- '''
- if super(InventoryModule, self).verify_file(path):
- if path.endswith(('stackpath_compute.yml', 'stackpath_compute.yaml')):
- return True
- display.debug(
- "stackpath_compute inventory filename must end with \
- 'stackpath_compute.yml' or 'stackpath_compute.yaml'"
- )
- return False
-
- def parse(self, inventory, loader, path, cache=True):
-
- super(InventoryModule, self).parse(inventory, loader, path)
-
- config = self._read_config_data(path)
- self._validate_config(config)
- self._set_credentials()
-
- # get user specifications
- self.use_internal_ip = self.get_option('use_internal_ip')
- if self.use_internal_ip:
- self.hostname_key = "ipAddress"
- else:
- self.hostname_key = "externalIpAddress"
-
- self.stack_slugs = self.get_option('stack_slugs')
- if not self.stack_slugs:
- try:
- stacks = self._stackpath_query_get_list(self.api_host + '/stack/v1/stacks')
- self._get_stack_slugs(stacks)
- except Exception:
- raise AnsibleError("Failed to get stack IDs from the Stackpath API: %s" % traceback.format_exc())
-
- cache_key = self.get_cache_key(path)
- # false when refresh_cache or --flush-cache is used
- if cache:
- # get the user-specified directive
- cache = self.get_option('cache')
-
- # Generate inventory
- cache_needs_update = False
- if cache:
- try:
- results = self._cache[cache_key]
- except KeyError:
- # if cache expires or cache file doesn't exist
- cache_needs_update = True
-
- if not cache or cache_needs_update:
- results = self._query()
-
- self._populate(make_unsafe(results))
-
- # If the cache has expired/doesn't exist or
- # if refresh_inventory/flush cache is used
- # when the user is using caching, update the cached inventory
- try:
- if cache_needs_update or (not cache and self.get_option('cache')):
- self._cache[cache_key] = results
- except Exception:
- raise AnsibleError("Failed to populate data: %s" % traceback.format_exc())
diff --git a/plugins/inventory/virtualbox.py b/plugins/inventory/virtualbox.py
index d48c294fd9..2eb52a617c 100644
--- a/plugins/inventory/virtualbox.py
+++ b/plugins/inventory/virtualbox.py
@@ -3,80 +3,81 @@
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
-DOCUMENTATION = '''
- author: Unknown (!UNKNOWN)
- name: virtualbox
- short_description: virtualbox inventory source
+DOCUMENTATION = r"""
+author: Unknown (!UNKNOWN)
+name: virtualbox
+short_description: Virtualbox inventory source
+description:
+ - Get inventory hosts from the local virtualbox installation.
+ - Uses a YAML configuration file that ends with virtualbox.(yml|yaml) or vbox.(yml|yaml).
+ - The inventory_hostname is always the 'Name' of the virtualbox instance.
+ - Groups can be assigned to the VMs using C(VBoxManage). Multiple groups can be assigned by using V(/) as a delimeter.
+ - A separate parameter, O(enable_advanced_group_parsing) is exposed to change grouping behaviour. See the parameter documentation
+ for details.
+extends_documentation_fragment:
+ - constructed
+ - inventory_cache
+options:
+ plugin:
+ description: Token that ensures this is a source file for the P(community.general.virtualbox#inventory) plugin.
+ type: string
+ required: true
+ choices: ['virtualbox', 'community.general.virtualbox']
+ running_only:
+ description: Toggles showing all VMs instead of only those currently running.
+ type: boolean
+ default: false
+ settings_password_file:
+ description: Provide a file containing the settings password (equivalent to C(--settingspwfile)).
+ type: string
+ network_info_path:
+ description: Property path to query for network information (C(ansible_host)).
+ type: string
+ default: "/VirtualBox/GuestInfo/Net/0/V4/IP"
+ query:
+ description: Create vars from virtualbox properties.
+ type: dictionary
+ default: {}
+ enable_advanced_group_parsing:
description:
- - Get inventory hosts from the local virtualbox installation.
- - Uses a YAML configuration file that ends with virtualbox.(yml|yaml) or vbox.(yml|yaml).
- - The inventory_hostname is always the 'Name' of the virtualbox instance.
- - Groups can be assigned to the VMs using C(VBoxManage). Multiple groups can be assigned by using V(/) as a delimeter.
- - A separate parameter, O(enable_advanced_group_parsing) is exposed to change grouping behaviour. See the parameter documentation for details.
- extends_documentation_fragment:
- - constructed
- - inventory_cache
- options:
- plugin:
- description: token that ensures this is a source file for the 'virtualbox' plugin
- type: string
- required: true
- choices: ['virtualbox', 'community.general.virtualbox']
- running_only:
- description: toggles showing all vms vs only those currently running
- type: boolean
- default: false
- settings_password_file:
- description: provide a file containing the settings password (equivalent to --settingspwfile)
- type: string
- network_info_path:
- description: property path to query for network information (ansible_host)
- type: string
- default: "/VirtualBox/GuestInfo/Net/0/V4/IP"
- query:
- description: create vars from virtualbox properties
- type: dictionary
- default: {}
- enable_advanced_group_parsing:
- description:
- - The default group parsing rule (when this setting is set to V(false)) is to split the VirtualBox VM's group based on the V(/) character and
- assign the resulting list elements as an Ansible Group.
- - Setting O(enable_advanced_group_parsing=true) changes this behaviour to match VirtualBox's interpretation of groups according to
- U(https://www.virtualbox.org/manual/UserManual.html#gui-vmgroups).
- Groups are now split using the V(,) character, and the V(/) character indicates nested groups.
- - When enabled, a VM that's been configured using V(VBoxManage modifyvm "vm01" --groups "/TestGroup/TestGroup2,/TestGroup3") will result in
- the group C(TestGroup2) being a child group of C(TestGroup); and
- the VM being a part of C(TestGroup2) and C(TestGroup3).
- default: false
- type: bool
- version_added: 9.2.0
-'''
+ - The default group parsing rule (when this setting is set to V(false)) is to split the VirtualBox VM's group based
+ on the V(/) character and assign the resulting list elements as an Ansible Group.
+ - Setting O(enable_advanced_group_parsing=true) changes this behaviour to match VirtualBox's interpretation of groups
+ according to U(https://www.virtualbox.org/manual/UserManual.html#gui-vmgroups). Groups are now split using the V(,)
+ character, and the V(/) character indicates nested groups.
+ - When enabled, a VM that's been configured using V(VBoxManage modifyvm "vm01" --groups "/TestGroup/TestGroup2,/TestGroup3")
+ results in the group C(TestGroup2) being a child group of C(TestGroup); and the VM being a part of C(TestGroup2)
+ and C(TestGroup3).
+ default: false
+ type: bool
+ version_added: 9.2.0
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
+---
# file must be named vbox.yaml or vbox.yml
-simple_config_file:
- plugin: community.general.virtualbox
- settings_password_file: /etc/virtulbox/secrets
- query:
- logged_in_users: /VirtualBox/GuestInfo/OS/LoggedInUsersList
- compose:
- ansible_connection: ('indows' in vbox_Guest_OS)|ternary('winrm', 'ssh')
+plugin: community.general.virtualbox
+settings_password_file: /etc/virtualbox/secrets
+query:
+ logged_in_users: /VirtualBox/GuestInfo/OS/LoggedInUsersList
+compose:
+ ansible_connection: ('indows' in vbox_Guest_OS)|ternary('winrm', 'ssh')
+---
# add hosts (all match with minishift vm) to the group container if any of the vms are in ansible_inventory'
plugin: community.general.virtualbox
groups:
container: "'minis' in (inventory_hostname)"
-'''
+"""
import os
from subprocess import Popen, PIPE
from ansible.errors import AnsibleParserError
-from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text
+from ansible.module_utils.common.text.converters import to_bytes, to_text
from ansible.module_utils.common._collections_compat import MutableMapping
from ansible.plugins.inventory import BaseInventoryPlugin, Constructable, Cacheable
from ansible.module_utils.common.process import get_bin_path
@@ -203,7 +204,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
else:
# found vars, accumulate in hostvars for clean inventory set
- pref_k = make_unsafe('vbox_' + k.strip().replace(' ', '_'))
+ pref_k = make_unsafe(f"vbox_{k.strip().replace(' ', '_')}")
leading_spaces = len(k) - len(k.lstrip(' '))
if 0 < leading_spaces <= 2:
if prevkey not in hostvars[current_host] or not isinstance(hostvars[current_host][prevkey], dict):
@@ -257,7 +258,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
def _handle_vboxmanage_group_string(self, vboxmanage_group, current_host, cacheable_results):
'''Handles parsing the VM's Group assignment from VBoxManage according to VirtualBox documentation.'''
# Per the VirtualBox documentation, a VM can be part of many groups,
- # and it's possible to have nested groups.
+ # and it is possible to have nested groups.
# Many groups are separated by commas ",", and nested groups use
# slash "/".
# https://www.virtualbox.org/manual/UserManual.html#gui-vmgroups
@@ -352,7 +353,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
try:
p = Popen(cmd, stdout=PIPE)
except Exception as e:
- raise AnsibleParserError(to_native(e))
+ raise AnsibleParserError(str(e))
source_data = p.stdout.read().splitlines()
diff --git a/plugins/inventory/xen_orchestra.py b/plugins/inventory/xen_orchestra.py
index 4094af2468..e6d828845a 100644
--- a/plugins/inventory/xen_orchestra.py
+++ b/plugins/inventory/xen_orchestra.py
@@ -3,65 +3,83 @@
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
-DOCUMENTATION = '''
- name: xen_orchestra
- short_description: Xen Orchestra inventory source
- version_added: 4.1.0
- author:
- - Dom Del Nano (@ddelnano)
- - Samori Gorse (@shinuza)
- requirements:
- - websocket-client >= 1.0.0
+DOCUMENTATION = r"""
+name: xen_orchestra
+short_description: Xen Orchestra inventory source
+version_added: 4.1.0
+author:
+ - Dom Del Nano (@ddelnano)
+ - Samori Gorse (@shinuza)
+requirements:
+ - websocket-client >= 1.0.0
+description:
+ - Get inventory hosts from a Xen Orchestra deployment.
+ - Uses a configuration file as an inventory source, it must end in C(.xen_orchestra.yml) or C(.xen_orchestra.yaml).
+extends_documentation_fragment:
+ - constructed
+ - inventory_cache
+options:
+ plugin:
+ description: The name of this plugin, it should always be set to V(community.general.xen_orchestra) for this plugin to
+ recognize it as its own.
+ required: true
+ choices: ['community.general.xen_orchestra']
+ type: str
+ api_host:
description:
- - Get inventory hosts from a Xen Orchestra deployment.
- - 'Uses a configuration file as an inventory source, it must end in C(.xen_orchestra.yml) or C(.xen_orchestra.yaml).'
- extends_documentation_fragment:
- - constructed
- - inventory_cache
- options:
- plugin:
- description: The name of this plugin, it should always be set to V(community.general.xen_orchestra) for this plugin to recognize it as its own.
- required: true
- choices: ['community.general.xen_orchestra']
- type: str
- api_host:
- description:
- - API host to XOA API.
- - If the value is not specified in the inventory configuration, the value of environment variable E(ANSIBLE_XO_HOST) will be used instead.
- type: str
- env:
- - name: ANSIBLE_XO_HOST
- user:
- description:
- - Xen Orchestra user.
- - If the value is not specified in the inventory configuration, the value of environment variable E(ANSIBLE_XO_USER) will be used instead.
- required: true
- type: str
- env:
- - name: ANSIBLE_XO_USER
- password:
- description:
- - Xen Orchestra password.
- - If the value is not specified in the inventory configuration, the value of environment variable E(ANSIBLE_XO_PASSWORD) will be used instead.
- required: true
- type: str
- env:
- - name: ANSIBLE_XO_PASSWORD
- validate_certs:
- description: Verify TLS certificate if using HTTPS.
- type: boolean
- default: true
- use_ssl:
- description: Use wss when connecting to the Xen Orchestra API
- type: boolean
- default: true
-'''
+ - API host to XOA API.
+ - If the value is not specified in the inventory configuration, the value of environment variable E(ANSIBLE_XO_HOST)
+ is used instead.
+ type: str
+ env:
+ - name: ANSIBLE_XO_HOST
+ user:
+ description:
+ - Xen Orchestra user.
+ - If the value is not specified in the inventory configuration, the value of environment variable E(ANSIBLE_XO_USER)
+ is used instead.
+ required: true
+ type: str
+ env:
+ - name: ANSIBLE_XO_USER
+ password:
+ description:
+ - Xen Orchestra password.
+ - If the value is not specified in the inventory configuration, the value of environment variable E(ANSIBLE_XO_PASSWORD)
+ is used instead.
+ required: true
+ type: str
+ env:
+ - name: ANSIBLE_XO_PASSWORD
+ validate_certs:
+ description: Verify TLS certificate if using HTTPS.
+ type: boolean
+ default: true
+ use_ssl:
+ description: Use wss when connecting to the Xen Orchestra API.
+ type: boolean
+ default: true
+ use_vm_uuid:
+ description:
+ - Import Xen VMs to inventory using their UUID as the VM entry name.
+ - If set to V(false) use VM name labels instead of UUIDs.
+ type: boolean
+ default: true
+ version_added: 10.4.0
+ use_host_uuid:
+ description:
+ - Import Xen Hosts to inventory using their UUID as the Host entry name.
+ - If set to V(false) use Host name labels instead of UUIDs.
+ type: boolean
+ default: true
+ version_added: 10.4.0
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
+---
# file must be named xen_orchestra.yaml or xen_orchestra.yml
plugin: community.general.xen_orchestra
api_host: 192.168.1.255
@@ -70,11 +88,12 @@ password: xo_pwd
validate_certs: true
use_ssl: true
groups:
- kube_nodes: "'kube_node' in tags"
+ kube_nodes: "'kube_node' in tags"
compose:
- ansible_port: 2222
-
-'''
+ ansible_port: 2222
+use_vm_uuid: false
+use_host_uuid: true
+"""
import json
import ssl
@@ -138,7 +157,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
sslopt = None if validate_certs else {'cert_reqs': ssl.CERT_NONE}
self.conn = create_connection(
- '{0}://{1}/api/'.format(proto, xoa_api_host), sslopt=sslopt)
+ f'{proto}://{xoa_api_host}/api/', sslopt=sslopt)
CALL_TIMEOUT = 100
"""Number of 1/10ths of a second to wait before method call times out."""
@@ -162,8 +181,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
sleep(0.1)
waited += 1
- raise AnsibleError(
- 'Method call {method} timed out after {timeout} seconds.'.format(method=method, timeout=self.CALL_TIMEOUT / 10))
+ raise AnsibleError(f'Method call {method} timed out after {self.CALL_TIMEOUT / 10} seconds.')
def login(self, user, password):
result = self.call('session.signIn', {
@@ -171,15 +189,13 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
})
if 'error' in result:
- raise AnsibleError(
- 'Could not connect: {0}'.format(result['error']))
+ raise AnsibleError(f"Could not connect: {result['error']}")
def get_object(self, name):
answer = self.call('xo.getAllObjects', {'filter': {'type': name}})
if 'error' in answer:
- raise AnsibleError(
- 'Could not request: {0}'.format(answer['error']))
+ raise AnsibleError(f"Could not request: {answer['error']}")
return answer['result']
@@ -200,10 +216,20 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
self._set_composite_vars(self.get_option('compose'), variables, name, strict=strict)
def _add_vms(self, vms, hosts, pools):
+ vm_name_list = []
for uuid, vm in vms.items():
+ if self.vm_entry_name_type == 'name_label':
+ if vm['name_label'] not in vm_name_list:
+ entry_name = vm['name_label']
+ vm_name_list.append(vm['name_label'])
+ else:
+ vm_duplicate_count = vm_name_list.count(vm['name_label'])
+ entry_name = f"{vm['name_label']}_{vm_duplicate_count}"
+ vm_name_list.append(vm['name_label'])
+ else:
+ entry_name = uuid
group = 'with_ip'
ip = vm.get('mainIpAddress')
- entry_name = uuid
power_state = vm['power_state'].lower()
pool_name = self._pool_group_name_for_uuid(pools, vm['$poolId'])
host_name = self._host_group_name_for_uuid(hosts, vm['$container'])
@@ -250,10 +276,20 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
self._apply_constructable(entry_name, self.inventory.get_host(entry_name).get_vars())
def _add_hosts(self, hosts, pools):
+ host_name_list = []
for host in hosts.values():
- entry_name = host['uuid']
- group_name = 'xo_host_{0}'.format(
- clean_group_name(host['name_label']))
+ if self.host_entry_name_type == 'name_label':
+ if host['name_label'] not in host_name_list:
+ entry_name = host['name_label']
+ host_name_list.append(host['name_label'])
+ else:
+ host_duplicate_count = host_name_list.count(host['name_label'])
+ entry_name = f"{host['name_label']}_{host_duplicate_count}"
+ host_name_list.append(host['name_label'])
+ else:
+ entry_name = host['uuid']
+
+ group_name = f"xo_host_{clean_group_name(host['name_label'])}"
pool_name = self._pool_group_name_for_uuid(pools, host['$poolId'])
self.inventory.add_group(group_name)
@@ -276,15 +312,13 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
entry_name, 'product_brand', host['productBrand'])
for pool in pools.values():
- group_name = 'xo_pool_{0}'.format(
- clean_group_name(pool['name_label']))
+ group_name = f"xo_pool_{clean_group_name(pool['name_label'])}"
self.inventory.add_group(group_name)
def _add_pools(self, pools):
for pool in pools.values():
- group_name = 'xo_pool_{0}'.format(
- clean_group_name(pool['name_label']))
+ group_name = f"xo_pool_{clean_group_name(pool['name_label'])}"
self.inventory.add_group(group_name)
@@ -292,16 +326,13 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
def _pool_group_name_for_uuid(self, pools, pool_uuid):
for pool in pools:
if pool == pool_uuid:
- return 'xo_pool_{0}'.format(
- clean_group_name(pools[pool_uuid]['name_label']))
+ return f"xo_pool_{clean_group_name(pools[pool_uuid]['name_label'])}"
# TODO: Refactor
def _host_group_name_for_uuid(self, hosts, host_uuid):
for host in hosts:
if host == host_uuid:
- return 'xo_host_{0}'.format(
- clean_group_name(hosts[host_uuid]['name_label']
- ))
+ return f"xo_host_{clean_group_name(hosts[host_uuid]['name_label'])}"
def _populate(self, objects):
# Prepare general groups
@@ -347,5 +378,13 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
if not self.get_option('use_ssl'):
self.protocol = 'ws'
+ self.vm_entry_name_type = 'uuid'
+ if not self.get_option('use_vm_uuid'):
+ self.vm_entry_name_type = 'name_label'
+
+ self.host_entry_name_type = 'uuid'
+ if not self.get_option('use_host_uuid'):
+ self.host_entry_name_type = 'name_label'
+
objects = self._get_objects()
self._populate(make_unsafe(objects))
diff --git a/plugins/lookup/bitwarden.py b/plugins/lookup/bitwarden.py
index 5e31cc6f89..7d65792b7f 100644
--- a/plugins/lookup/bitwarden.py
+++ b/plugins/lookup/bitwarden.py
@@ -5,52 +5,65 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
-DOCUMENTATION = """
- name: bitwarden
- author:
- - Jonathan Lung (@lungj)
- requirements:
- - bw (command line utility)
- - be logged into bitwarden
- - bitwarden vault unlocked
- - E(BW_SESSION) environment variable set
- short_description: Retrieve secrets from Bitwarden
- version_added: 5.4.0
+DOCUMENTATION = r"""
+name: bitwarden
+author:
+ - Jonathan Lung (@lungj)
+requirements:
+ - bw (command line utility)
+ - be logged into bitwarden
+ - bitwarden vault unlocked
+ - E(BW_SESSION) environment variable set
+short_description: Retrieve secrets from Bitwarden
+version_added: 5.4.0
+description:
+ - Retrieve secrets from Bitwarden.
+options:
+ _terms:
+ description: Key(s) to fetch values for from login info.
+ required: true
+ type: list
+ elements: str
+ search:
description:
- - Retrieve secrets from Bitwarden.
- options:
- _terms:
- description: Key(s) to fetch values for from login info.
- required: true
- type: list
- elements: str
- search:
- description:
- - Field to retrieve, for example V(name) or V(id).
- - If set to V(id), only zero or one element can be returned.
- Use the Jinja C(first) filter to get the only list element.
- - If set to V(None) or V(''), or if O(_terms) is empty, records are not filtered by fields.
- type: str
- default: name
- version_added: 5.7.0
- field:
- description: Field to fetch. Leave unset to fetch whole response.
- type: str
- collection_id:
- description: Collection ID to filter results by collection. Leave unset to skip filtering.
- type: str
- version_added: 6.3.0
- organization_id:
- description: Organization ID to filter results by organization. Leave unset to skip filtering.
- type: str
- version_added: 8.5.0
- bw_session:
- description: Pass session key instead of reading from env.
- type: str
- version_added: 8.4.0
+ - Field to retrieve, for example V(name) or V(id).
+ - If set to V(id), only zero or one element can be returned. Use the Jinja C(first) filter to get the only list element.
+ - If set to V(None) or V(''), or if O(_terms) is empty, records are not filtered by fields.
+ type: str
+ default: name
+ version_added: 5.7.0
+ field:
+ description: Field to fetch. Leave unset to fetch whole response.
+ type: str
+ collection_id:
+ description:
+ - Collection ID to filter results by collection. Leave unset to skip filtering.
+ - O(collection_id) and O(collection_name) are mutually exclusive.
+ type: str
+ version_added: 6.3.0
+ collection_name:
+ description:
+ - Collection name to filter results by collection. Leave unset to skip filtering.
+ - O(collection_id) and O(collection_name) are mutually exclusive.
+ type: str
+ version_added: 10.4.0
+ organization_id:
+ description: Organization ID to filter results by organization. Leave unset to skip filtering.
+ type: str
+ version_added: 8.5.0
+ bw_session:
+ description: Pass session key instead of reading from env.
+ type: str
+ version_added: 8.4.0
+ result_count:
+ description:
+ - Number of results expected for the lookup query. Task fails if O(result_count) is set but does not match the number
+ of query results. Leave empty to skip this check.
+ type: int
+ version_added: 10.4.0
"""
-EXAMPLES = """
+EXAMPLES = r"""
- name: "Get 'password' from all Bitwarden records named 'a_test'"
ansible.builtin.debug:
msg: >-
@@ -85,21 +98,31 @@ EXAMPLES = """
ansible.builtin.debug:
msg: >-
{{ lookup('community.general.bitwarden', None, collection_id='bafba515-af11-47e6-abe3-af1200cd18b2') }}
+
+- name: "Get all Bitwarden records from collection"
+ ansible.builtin.debug:
+ msg: >-
+ {{ lookup('community.general.bitwarden', None, collection_name='my_collections/test_collection') }}
+
+- name: "Get Bitwarden record named 'a_test', ensure there is exactly one match"
+ ansible.builtin.debug:
+ msg: >-
+ {{ lookup('community.general.bitwarden', 'a_test', result_count=1) }}
"""
-RETURN = """
- _raw:
- description:
- - A one-element list that contains a list of requested fields or JSON objects of matches.
- - If you use C(query), you get a list of lists. If you use C(lookup) without C(wantlist=true),
- this always gets reduced to a list of field values or JSON objects.
- type: list
- elements: list
+RETURN = r"""
+_raw:
+ description:
+ - A one-element list that contains a list of requested fields or JSON objects of matches.
+ - If you use C(query), you get a list of lists. If you use C(lookup) without C(wantlist=true), this always gets reduced
+ to a list of field values or JSON objects.
+ type: list
+ elements: list
"""
from subprocess import Popen, PIPE
-from ansible.errors import AnsibleError
+from ansible.errors import AnsibleError, AnsibleOptionsError
from ansible.module_utils.common.text.converters import to_bytes, to_text
from ansible.parsing.ajson import AnsibleJSONDecoder
from ansible.plugins.lookup import LookupBase
@@ -207,10 +230,28 @@ class Bitwarden(object):
continue
if matches and not field_matches:
- raise AnsibleError("field {field} does not exist in {search_value}".format(field=field, search_value=search_value))
+ raise AnsibleError(f"field {field} does not exist in {search_value}")
return field_matches
+ def get_collection_ids(self, collection_name: str, organization_id=None) -> list[str]:
+ """Return matching IDs of collections whose name is equal to collection_name."""
+
+ # Prepare set of params for Bitwarden CLI
+ params = ['list', 'collections', '--search', collection_name]
+
+ if organization_id:
+ params.extend(['--organizationid', organization_id])
+
+ out, err = self._run(params)
+
+ # This includes things that matched in different fields.
+ initial_matches = AnsibleJSONDecoder().raw_decode(out)[0]
+
+ # Filter to only return the ID of a collections with exactly matching name
+ return [item['id'] for item in initial_matches
+ if str(item.get('name')).lower() == collection_name.lower()]
+
class LookupModule(LookupBase):
@@ -219,7 +260,9 @@ class LookupModule(LookupBase):
field = self.get_option('field')
search_field = self.get_option('search')
collection_id = self.get_option('collection_id')
+ collection_name = self.get_option('collection_name')
organization_id = self.get_option('organization_id')
+ result_count = self.get_option('result_count')
_bitwarden.session = self.get_option('bw_session')
if not _bitwarden.unlocked:
@@ -228,7 +271,27 @@ class LookupModule(LookupBase):
if not terms:
terms = [None]
- return [_bitwarden.get_field(field, term, search_field, collection_id, organization_id) for term in terms]
+ if collection_name and collection_id:
+ raise AnsibleOptionsError("'collection_name' and 'collection_id' are mutually exclusive!")
+ elif collection_name:
+ collection_ids = _bitwarden.get_collection_ids(collection_name, organization_id)
+ if not collection_ids:
+ raise BitwardenException("No matching collections found!")
+ else:
+ collection_ids = [collection_id]
+
+ results = [
+ _bitwarden.get_field(field, term, search_field, collection_id, organization_id)
+ for collection_id in collection_ids
+ for term in terms
+ ]
+
+ for result in results:
+ if result_count is not None and len(result) != result_count:
+ raise BitwardenException(
+ f"Number of results doesn't match result_count! ({len(result)} != {result_count})")
+
+ return results
_bitwarden = Bitwarden()
diff --git a/plugins/lookup/bitwarden_secrets_manager.py b/plugins/lookup/bitwarden_secrets_manager.py
index 3d08067105..431384c079 100644
--- a/plugins/lookup/bitwarden_secrets_manager.py
+++ b/plugins/lookup/bitwarden_secrets_manager.py
@@ -6,31 +6,31 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
-DOCUMENTATION = """
- name: bitwarden_secrets_manager
- author:
- - jantari (@jantari)
- requirements:
- - bws (command line utility)
- short_description: Retrieve secrets from Bitwarden Secrets Manager
- version_added: 7.2.0
- description:
- - Retrieve secrets from Bitwarden Secrets Manager.
- options:
- _terms:
- description: Secret ID(s) to fetch values for.
- required: true
- type: list
- elements: str
- bws_access_token:
- description: The BWS access token to use for this lookup.
- env:
- - name: BWS_ACCESS_TOKEN
- required: true
- type: str
+DOCUMENTATION = r"""
+name: bitwarden_secrets_manager
+author:
+ - jantari (@jantari)
+requirements:
+ - bws (command line utility)
+short_description: Retrieve secrets from Bitwarden Secrets Manager
+version_added: 7.2.0
+description:
+ - Retrieve secrets from Bitwarden Secrets Manager.
+options:
+ _terms:
+ description: Secret ID(s) to fetch values for.
+ required: true
+ type: list
+ elements: str
+ bws_access_token:
+ description: The BWS access token to use for this lookup.
+ env:
+ - name: BWS_ACCESS_TOKEN
+ required: true
+ type: str
"""
-EXAMPLES = """
+EXAMPLES = r"""
- name: Get a secret relying on the BWS_ACCESS_TOKEN environment variable for authentication
ansible.builtin.debug:
msg: >-
@@ -62,11 +62,11 @@ EXAMPLES = """
{{ lookup("community.general.bitwarden_secrets_manager", "2bc23e48-4932-40de-a047-5524b7ddc972").value }}
"""
-RETURN = """
- _raw:
- description: List containing one or more secrets.
- type: list
- elements: dict
+RETURN = r"""
+_raw:
+ description: List containing one or more secrets.
+ type: list
+ elements: dict
"""
from subprocess import Popen, PIPE
diff --git a/plugins/lookup/cartesian.py b/plugins/lookup/cartesian.py
index d63f3943b0..f2ad576907 100644
--- a/plugins/lookup/cartesian.py
+++ b/plugins/lookup/cartesian.py
@@ -6,24 +6,24 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
-DOCUMENTATION = '''
- author: Unknown (!UNKNOWN)
- name: cartesian
- short_description: returns the cartesian product of lists
+DOCUMENTATION = r"""
+author: Unknown (!UNKNOWN)
+name: cartesian
+short_description: Returns the cartesian product of lists
+description:
+ - Takes the input lists and returns a list that represents the product of the input lists.
+ - It is clearer with an example, it turns [1, 2, 3], [a, b] into [1, a], [1, b], [2, a], [2, b], [3, a], [3, b].
+ - You can see the exact syntax in the examples section.
+options:
+ _terms:
description:
- - Takes the input lists and returns a list that represents the product of the input lists.
- - It is clearer with an example, it turns [1, 2, 3], [a, b] into [1, a], [1, b], [2, a], [2, b], [3, a], [3, b].
- You can see the exact syntax in the examples section.
- options:
- _terms:
- description:
- - a set of lists
- type: list
- elements: list
- required: true
-'''
+ - A set of lists.
+ type: list
+ elements: list
+ required: true
+"""
-EXAMPLES = """
+EXAMPLES = r"""
- name: Example of the change in the description
ansible.builtin.debug:
msg: "{{ lookup('community.general.cartesian', [1,2,3], [a, b])}}"
@@ -34,15 +34,15 @@ EXAMPLES = """
with_community.general.cartesian:
- "{{list1}}"
- "{{list2}}"
- - [1,2,3,4,5,6]
+ - [1, 2, 3, 4, 5, 6]
"""
-RETURN = """
- _list:
- description:
- - list of lists composed of elements of the input lists
- type: list
- elements: list
+RETURN = r"""
+_list:
+ description:
+ - List of lists composed of elements of the input lists.
+ type: list
+ elements: list
"""
from itertools import product
@@ -66,13 +66,7 @@ class LookupModule(LookupBase):
"""
results = []
for x in terms:
- try:
- intermediate = listify_lookup_plugin_terms(x, templar=self._templar)
- except TypeError:
- # The loader argument is deprecated in ansible-core 2.14+. Fall back to
- # pre-2.14 behavior for older ansible-core versions.
- intermediate = listify_lookup_plugin_terms(x, templar=self._templar, loader=self._loader)
- results.append(intermediate)
+ results.append(listify_lookup_plugin_terms(x, templar=self._templar))
return results
def run(self, terms, variables=None, **kwargs):
diff --git a/plugins/lookup/chef_databag.py b/plugins/lookup/chef_databag.py
index a116b21e5f..8fe53744ee 100644
--- a/plugins/lookup/chef_databag.py
+++ b/plugins/lookup/chef_databag.py
@@ -6,42 +6,41 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
-DOCUMENTATION = '''
- author: Unknown (!UNKNOWN)
- name: chef_databag
- short_description: fetches data from a Chef Databag
+DOCUMENTATION = r"""
+author: Unknown (!UNKNOWN)
+name: chef_databag
+short_description: Fetches data from a Chef Databag
+description:
+ - 'This is a lookup plugin to provide access to chef data bags using the pychef package. It interfaces with the chef server
+ API using the same methods to find a knife or chef-client config file to load parameters from, starting from either the
+ given base path or the current working directory. The lookup order mirrors the one from Chef, all folders in the base
+ path are walked back looking for the following configuration file in order: C(.chef/knife.rb), C(~/.chef/knife.rb), C(/etc/chef/client.rb).'
+requirements:
+ - "pychef (L(Python library, https://pychef.readthedocs.io), C(pip install pychef))"
+options:
+ name:
description:
- - "This is a lookup plugin to provide access to chef data bags using the pychef package.
- It interfaces with the chef server api using the same methods to find a knife or chef-client config file to load parameters from,
- starting from either the given base path or the current working directory.
- The lookup order mirrors the one from Chef, all folders in the base path are walked back looking for the following configuration
- file in order : .chef/knife.rb, ~/.chef/knife.rb, /etc/chef/client.rb"
- requirements:
- - "pychef (L(Python library, https://pychef.readthedocs.io), C(pip install pychef))"
- options:
- name:
- description:
- - Name of the databag
- type: string
- required: true
- item:
- description:
- - Item to fetch
- type: string
- required: true
-'''
-
-EXAMPLES = """
- - ansible.builtin.debug:
- msg: "{{ lookup('community.general.chef_databag', 'name=data_bag_name item=data_bag_item') }}"
+ - Name of the databag.
+ type: string
+ required: true
+ item:
+ description:
+ - Item to fetch.
+ type: string
+ required: true
"""
-RETURN = """
- _raw:
- description:
- - The value from the databag.
- type: list
- elements: dict
+EXAMPLES = r"""
+- ansible.builtin.debug:
+ msg: "{{ lookup('community.general.chef_databag', 'name=data_bag_name item=data_bag_item') }}"
+"""
+
+RETURN = r"""
+_raw:
+ description:
+ - The value from the databag.
+ type: list
+ elements: dict
"""
from ansible.errors import AnsibleError
@@ -81,11 +80,11 @@ class LookupModule(LookupBase):
setattr(self, arg, parsed)
except ValueError:
raise AnsibleError(
- "can't parse arg {0}={1} as string".format(arg, arg_raw)
+ f"can't parse arg {arg}={arg_raw} as string"
)
if args:
raise AnsibleError(
- "unrecognized arguments to with_sequence: %r" % list(args.keys())
+ f"unrecognized arguments to with_sequence: {list(args.keys())!r}"
)
def run(self, terms, variables=None, **kwargs):
diff --git a/plugins/lookup/collection_version.py b/plugins/lookup/collection_version.py
index 0f93c03c26..142c516df5 100644
--- a/plugins/lookup/collection_version.py
+++ b/plugins/lookup/collection_version.py
@@ -5,18 +5,17 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
-DOCUMENTATION = """
+DOCUMENTATION = r"""
name: collection_version
author: Felix Fontein (@felixfontein)
version_added: "4.0.0"
short_description: Retrieves the version of an installed collection
description:
- - This lookup allows to query the version of an installed collection, and to determine whether a
- collection is installed at all.
- - By default it returns V(none) for non-existing collections and V(*) for collections without a
- version number. The latter should only happen in development environments, or when installing
- a collection from git which has no version in its C(galaxy.yml). This behavior can be adjusted
- by providing other values with O(result_not_found) and O(result_no_version).
+ - This lookup allows to query the version of an installed collection, and to determine whether a collection is installed
+ at all.
+ - By default it returns V(none) for non-existing collections and V(*) for collections without a version number. The latter
+ should only happen in development environments, or when installing a collection from git which has no version in its C(galaxy.yml).
+ This behavior can be adjusted by providing other values with O(result_not_found) and O(result_no_version).
options:
_terms:
description:
@@ -34,30 +33,27 @@ options:
result_no_version:
description:
- The value to return when the collection has no version number.
- - This can happen for collections installed from git which do not have a version number
- in C(galaxy.yml).
+ - This can happen for collections installed from git which do not have a version number in C(galaxy.yml).
- By default, V(*) is returned.
type: string
default: '*'
"""
-EXAMPLES = """
+EXAMPLES = r"""
- name: Check version of community.general
ansible.builtin.debug:
msg: "community.general version {{ lookup('community.general.collection_version', 'community.general') }}"
"""
-RETURN = """
- _raw:
- description:
- - The version number of the collections listed as input.
- - If a collection can not be found, it will return the value provided in O(result_not_found).
- By default, this is V(none).
- - If a collection can be found, but the version not identified, it will return the value provided in
- O(result_no_version). By default, this is V(*). This can happen for collections installed
- from git which do not have a version number in V(galaxy.yml).
- type: list
- elements: str
+RETURN = r"""
+_raw:
+ description:
+ - The version number of the collections listed as input.
+ - If a collection can not be found, it returns the value provided in O(result_not_found). By default, this is V(none).
+ - If a collection can be found, but the version not identified, it returns the value provided in O(result_no_version).
+ By default, this is V(*). This can happen for collections installed from git which do not have a version number in V(galaxy.yml).
+ type: list
+ elements: str
"""
import json
@@ -115,10 +111,10 @@ class LookupModule(LookupBase):
for term in terms:
if not FQCN_RE.match(term):
- raise AnsibleLookupError('"{term}" is not a FQCN'.format(term=term))
+ raise AnsibleLookupError(f'"{term}" is not a FQCN')
try:
- collection_pkg = import_module('ansible_collections.{fqcn}'.format(fqcn=term))
+ collection_pkg = import_module(f'ansible_collections.{term}')
except ImportError:
# Collection not found
result.append(not_found)
@@ -127,7 +123,7 @@ class LookupModule(LookupBase):
try:
data = load_collection_meta(collection_pkg, no_version=no_version)
except Exception as exc:
- raise AnsibleLookupError('Error while loading metadata for {fqcn}: {error}'.format(fqcn=term, error=exc))
+ raise AnsibleLookupError(f'Error while loading metadata for {term}: {exc}')
result.append(data.get('version', no_version))
diff --git a/plugins/lookup/consul_kv.py b/plugins/lookup/consul_kv.py
index 79eb65edb1..f57b3da891 100644
--- a/plugins/lookup/consul_kv.py
+++ b/plugins/lookup/consul_kv.py
@@ -7,109 +7,109 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
-DOCUMENTATION = '''
- author: Unknown (!UNKNOWN)
- name: consul_kv
- short_description: Fetch metadata from a Consul key value store.
+DOCUMENTATION = r"""
+author: Unknown (!UNKNOWN)
+name: consul_kv
+short_description: Fetch metadata from a Consul key value store
+description:
+ - Lookup metadata for a playbook from the key value store in a Consul cluster. Values can be easily set in the kv store
+ with simple rest commands.
+ - C(curl -X PUT -d 'some-value' http://localhost:8500/v1/kv/ansible/somedata).
+requirements:
+ - 'python-consul python library U(https://python-consul.readthedocs.io/en/latest/#installation)'
+options:
+ _raw:
+ description: List of key(s) to retrieve.
+ type: list
+ elements: string
+ recurse:
+ type: boolean
+ description: If V(true), retrieves all the values that have the given key as prefix.
+ default: false
+ index:
description:
- - Lookup metadata for a playbook from the key value store in a Consul cluster.
- Values can be easily set in the kv store with simple rest commands
- - C(curl -X PUT -d 'some-value' http://localhost:8500/v1/kv/ansible/somedata)
- requirements:
- - 'python-consul python library U(https://python-consul.readthedocs.io/en/latest/#installation)'
- options:
- _raw:
- description: List of key(s) to retrieve.
- type: list
- elements: string
- recurse:
- type: boolean
- description: If true, will retrieve all the values that have the given key as prefix.
- default: false
- index:
- description:
- - If the key has a value with the specified index then this is returned allowing access to historical values.
- type: int
- datacenter:
- description:
- - Retrieve the key from a consul datacenter other than the default for the consul host.
- type: str
- token:
- description: The acl token to allow access to restricted values.
- type: str
- host:
- default: localhost
- type: str
- description:
- - The target to connect to, must be a resolvable address.
- - Will be determined from E(ANSIBLE_CONSUL_URL) if that is set.
- ini:
- - section: lookup_consul
- key: host
- port:
- description:
- - The port of the target host to connect to.
- - If you use E(ANSIBLE_CONSUL_URL) this value will be used from there.
- type: int
- default: 8500
- scheme:
- default: http
- type: str
- description:
- - Whether to use http or https.
- - If you use E(ANSIBLE_CONSUL_URL) this value will be used from there.
- validate_certs:
- default: true
- description: Whether to verify the TLS connection or not.
- type: bool
- env:
- - name: ANSIBLE_CONSUL_VALIDATE_CERTS
- ini:
- - section: lookup_consul
- key: validate_certs
- client_cert:
- description: The client cert to verify the TLS connection.
- type: str
- env:
- - name: ANSIBLE_CONSUL_CLIENT_CERT
- ini:
- - section: lookup_consul
- key: client_cert
- url:
- description:
- - The target to connect to.
- - "Should look like this: V(https://my.consul.server:8500)."
- type: str
- version_added: 1.0.0
- env:
- - name: ANSIBLE_CONSUL_URL
- ini:
- - section: lookup_consul
- key: url
-'''
-
-EXAMPLES = """
- - ansible.builtin.debug:
- msg: 'key contains {{item}}'
- with_community.general.consul_kv:
- - 'key/to/retrieve'
-
- - name: Parameters can be provided after the key be more specific about what to retrieve
- ansible.builtin.debug:
- msg: 'key contains {{item}}'
- with_community.general.consul_kv:
- - 'key/to recurse=true token=E6C060A9-26FB-407A-B83E-12DDAFCB4D98'
-
- - name: retrieving a KV from a remote cluster on non default port
- ansible.builtin.debug:
- msg: "{{ lookup('community.general.consul_kv', 'my/key', host='10.10.10.10', port=2000) }}"
+ - If the key has a value with the specified index then this is returned allowing access to historical values.
+ type: int
+ datacenter:
+ description:
+ - Retrieve the key from a consul datacenter other than the default for the consul host.
+ type: str
+ token:
+ description: The acl token to allow access to restricted values.
+ type: str
+ host:
+ default: localhost
+ type: str
+ description:
+ - The target to connect to, must be a resolvable address.
+ - It is determined from E(ANSIBLE_CONSUL_URL) if that is set.
+ ini:
+ - section: lookup_consul
+ key: host
+ port:
+ description:
+ - The port of the target host to connect to.
+ - If you use E(ANSIBLE_CONSUL_URL) this value is used from there.
+ type: int
+ default: 8500
+ scheme:
+ default: http
+ type: str
+ description:
+ - Whether to use http or https.
+ - If you use E(ANSIBLE_CONSUL_URL) this value is used from there.
+ validate_certs:
+ default: true
+ description: Whether to verify the TLS connection or not.
+ type: bool
+ env:
+ - name: ANSIBLE_CONSUL_VALIDATE_CERTS
+ ini:
+ - section: lookup_consul
+ key: validate_certs
+ client_cert:
+ description: The client cert to verify the TLS connection.
+ type: str
+ env:
+ - name: ANSIBLE_CONSUL_CLIENT_CERT
+ ini:
+ - section: lookup_consul
+ key: client_cert
+ url:
+ description:
+ - The target to connect to.
+ - 'Should look like this: V(https://my.consul.server:8500).'
+ type: str
+ version_added: 1.0.0
+ env:
+ - name: ANSIBLE_CONSUL_URL
+ ini:
+ - section: lookup_consul
+ key: url
"""
-RETURN = """
- _raw:
- description:
- - Value(s) stored in consul.
- type: dict
+EXAMPLES = r"""
+- ansible.builtin.debug:
+ msg: 'key contains {{item}}'
+ with_community.general.consul_kv:
+ - 'key/to/retrieve'
+
+- name: Parameters can be provided after the key be more specific about what to retrieve
+ ansible.builtin.debug:
+ msg: 'key contains {{item}}'
+ with_community.general.consul_kv:
+ - 'key/to recurse=true token=E6C060A9-26FB-407A-B83E-12DDAFCB4D98'
+
+- name: retrieving a KV from a remote cluster on non default port
+ ansible.builtin.debug:
+ msg: "{{ lookup('community.general.consul_kv', 'my/key', host='10.10.10.10', port=2000) }}"
+"""
+
+RETURN = r"""
+_raw:
+ description:
+ - Value(s) stored in consul.
+ type: dict
"""
from ansible.module_utils.six.moves.urllib.parse import urlparse
@@ -171,7 +171,7 @@ class LookupModule(LookupBase):
values.append(to_text(results[1]['Value']))
except Exception as e:
raise AnsibleError(
- "Error locating '%s' in kv store. Error was %s" % (term, e))
+ f"Error locating '{term}' in kv store. Error was {e}")
return values
@@ -192,7 +192,7 @@ class LookupModule(LookupBase):
if param and len(param) > 0:
name, value = param.split('=')
if name not in paramvals:
- raise AnsibleAssertionError("%s not a valid consul lookup parameter" % name)
+ raise AnsibleAssertionError(f"{name} not a valid consul lookup parameter")
paramvals[name] = value
except (ValueError, AssertionError) as e:
raise AnsibleError(e)
diff --git a/plugins/lookup/credstash.py b/plugins/lookup/credstash.py
index fd284f55c8..a170b13d03 100644
--- a/plugins/lookup/credstash.py
+++ b/plugins/lookup/credstash.py
@@ -6,54 +6,54 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
-DOCUMENTATION = '''
- author: Unknown (!UNKNOWN)
- name: credstash
- short_description: retrieve secrets from Credstash on AWS
- requirements:
- - credstash (python library)
- description:
- - "Credstash is a small utility for managing secrets using AWS's KMS and DynamoDB: https://github.com/fugue/credstash"
- options:
- _terms:
- description: term or list of terms to lookup in the credit store
- type: list
- elements: string
- required: true
- table:
- description: name of the credstash table to query
- type: str
- default: 'credential-store'
- version:
- description: Credstash version
- type: str
- default: ''
- region:
- description: AWS region
- type: str
- profile_name:
- description: AWS profile to use for authentication
- type: str
- env:
- - name: AWS_PROFILE
- aws_access_key_id:
- description: AWS access key ID
- type: str
- env:
- - name: AWS_ACCESS_KEY_ID
- aws_secret_access_key:
- description: AWS access key
- type: str
- env:
- - name: AWS_SECRET_ACCESS_KEY
- aws_session_token:
- description: AWS session token
- type: str
- env:
- - name: AWS_SESSION_TOKEN
-'''
+DOCUMENTATION = r"""
+author: Unknown (!UNKNOWN)
+name: credstash
+short_description: Retrieve secrets from Credstash on AWS
+requirements:
+ - credstash (python library)
+description:
+ - "Credstash is a small utility for managing secrets using AWS's KMS and DynamoDB: https://github.com/fugue/credstash."
+options:
+ _terms:
+ description: Term or list of terms to lookup in the credit store.
+ type: list
+ elements: string
+ required: true
+ table:
+ description: Name of the credstash table to query.
+ type: str
+ default: 'credential-store'
+ version:
+ description: Credstash version.
+ type: str
+ default: ''
+ region:
+ description: AWS region.
+ type: str
+ profile_name:
+ description: AWS profile to use for authentication.
+ type: str
+ env:
+ - name: AWS_PROFILE
+ aws_access_key_id:
+ description: AWS access key ID.
+ type: str
+ env:
+ - name: AWS_ACCESS_KEY_ID
+ aws_secret_access_key:
+ description: AWS access key.
+ type: str
+ env:
+ - name: AWS_SECRET_ACCESS_KEY
+ aws_session_token:
+ description: AWS session token.
+ type: str
+ env:
+ - name: AWS_SESSION_TOKEN
+"""
-EXAMPLES = """
+EXAMPLES = r"""
- name: first use credstash to store your secrets
ansible.builtin.shell: credstash put my-github-password secure123
@@ -77,20 +77,20 @@ EXAMPLES = """
environment: production
tasks:
- - name: "Test credstash lookup plugin -- get the password with a context passed as a variable"
- ansible.builtin.debug:
- msg: "{{ lookup('community.general.credstash', 'some-password', context=context) }}"
+ - name: "Test credstash lookup plugin -- get the password with a context passed as a variable"
+ ansible.builtin.debug:
+ msg: "{{ lookup('community.general.credstash', 'some-password', context=context) }}"
- - name: "Test credstash lookup plugin -- get the password with a context defined here"
- ansible.builtin.debug:
- msg: "{{ lookup('community.general.credstash', 'some-password', context=dict(app='my_app', environment='production')) }}"
+ - name: "Test credstash lookup plugin -- get the password with a context defined here"
+ ansible.builtin.debug:
+ msg: "{{ lookup('community.general.credstash', 'some-password', context=dict(app='my_app', environment='production')) }}"
"""
-RETURN = """
- _raw:
- description:
- - Value(s) stored in Credstash.
- type: str
+RETURN = r"""
+_raw:
+ description:
+ - Value(s) stored in Credstash.
+ type: str
"""
from ansible.errors import AnsibleError
@@ -137,8 +137,8 @@ class LookupModule(LookupBase):
try:
ret.append(credstash.getSecret(term, version, region, table, context=context, **kwargs_pass))
except credstash.ItemNotFound:
- raise AnsibleError('Key {0} not found'.format(term))
+ raise AnsibleError(f'Key {term} not found')
except Exception as e:
- raise AnsibleError('Encountered exception while fetching {0}: {1}'.format(term, e))
+ raise AnsibleError(f'Encountered exception while fetching {term}: {e}')
return ret
diff --git a/plugins/lookup/cyberarkpassword.py b/plugins/lookup/cyberarkpassword.py
index 6a08675b3b..63834dce9b 100644
--- a/plugins/lookup/cyberarkpassword.py
+++ b/plugins/lookup/cyberarkpassword.py
@@ -6,62 +6,64 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
-DOCUMENTATION = '''
- author: Unknown (!UNKNOWN)
- name: cyberarkpassword
- short_description: get secrets from CyberArk AIM
- requirements:
- - CyberArk AIM tool installed
+DOCUMENTATION = r"""
+author: Unknown (!UNKNOWN)
+name: cyberarkpassword
+short_description: Get secrets from CyberArk AIM
+requirements:
+ - CyberArk AIM tool installed
+description:
+ - Get secrets from CyberArk AIM.
+options:
+ _command:
+ description: Cyberark CLI utility.
+ type: string
+ env:
+ - name: AIM_CLIPASSWORDSDK_CMD
+ default: '/opt/CARKaim/sdk/clipasswordsdk'
+ appid:
+ description: Defines the unique ID of the application that is issuing the password request.
+ type: string
+ required: true
+ query:
+ description: Describes the filter criteria for the password retrieval.
+ type: string
+ required: true
+ output:
description:
- - Get secrets from CyberArk AIM.
- options :
- _command:
- description: Cyberark CLI utility.
- type: string
- env:
- - name: AIM_CLIPASSWORDSDK_CMD
- default: '/opt/CARKaim/sdk/clipasswordsdk'
- appid:
- description: Defines the unique ID of the application that is issuing the password request.
- type: string
- required: true
- query:
- description: Describes the filter criteria for the password retrieval.
- type: string
- required: true
- output:
- description:
- - Specifies the desired output fields separated by commas.
- - "They could be: Password, PassProps., PasswordChangeInProcess"
- type: string
- default: 'password'
- _extra:
- description: for extra_params values please check parameters for clipasswordsdk in CyberArk's "Credential Provider and ASCP Implementation Guide"
- notes:
- - For Ansible on Windows, please change the -parameters (-p, -d, and -o) to /parameters (/p, /d, and /o) and change the location of CLIPasswordSDK.exe.
-'''
-
-EXAMPLES = """
- - name: passing options to the lookup
- ansible.builtin.debug:
- msg: '{{ lookup("community.general.cyberarkpassword", cyquery) }}'
- vars:
- cyquery:
- appid: "app_ansible"
- query: "safe=CyberArk_Passwords;folder=root;object=AdminPass"
- output: "Password,PassProps.UserName,PassProps.Address,PasswordChangeInProcess"
-
-
- - name: used in a loop
- ansible.builtin.debug:
- msg: "{{item}}"
- with_community.general.cyberarkpassword:
- appid: 'app_ansible'
- query: 'safe=CyberArk_Passwords;folder=root;object=AdminPass'
- output: 'Password,PassProps.UserName,PassProps.Address,PasswordChangeInProcess'
+ - Specifies the desired output fields separated by commas.
+ - 'They could be: Password, PassProps., PasswordChangeInProcess.'
+ type: string
+ default: 'password'
+ _extra:
+ description: For extra_params values please check parameters for clipasswordsdk in CyberArk's "Credential Provider and
+ ASCP Implementation Guide".
+notes:
+ - For Ansible on Windows, please change the -parameters (C(-p), C(-d), and C(-o)) to /parameters (C(/p), C(/d), and C(/o))
+ and change the location of C(CLIPasswordSDK.exe).
"""
-RETURN = """
+EXAMPLES = r"""
+- name: passing options to the lookup
+ ansible.builtin.debug:
+ msg: '{{ lookup("community.general.cyberarkpassword", cyquery) }}'
+ vars:
+ cyquery:
+ appid: "app_ansible"
+ query: "safe=CyberArk_Passwords;folder=root;object=AdminPass"
+ output: "Password,PassProps.UserName,PassProps.Address,PasswordChangeInProcess"
+
+
+- name: used in a loop
+ ansible.builtin.debug:
+ msg: "{{item}}"
+ with_community.general.cyberarkpassword:
+ appid: 'app_ansible'
+ query: 'safe=CyberArk_Passwords;folder=root;object=AdminPass'
+ output: 'Password,PassProps.UserName,PassProps.Address,PasswordChangeInProcess'
+"""
+
+RETURN = r"""
_result:
description: A list containing one dictionary.
type: list
@@ -69,12 +71,12 @@ _result:
contains:
password:
description:
- - The actual value stored
+ - The actual value stored.
passprops:
- description: properties assigned to the entry
+ description: Properties assigned to the entry.
type: dictionary
passwordchangeinprocess:
- description: did the password change?
+ description: Did the password change?
"""
import os
@@ -84,7 +86,7 @@ from subprocess import Popen
from ansible.errors import AnsibleError
from ansible.plugins.lookup import LookupBase
-from ansible.module_utils.common.text.converters import to_bytes, to_text, to_native
+from ansible.module_utils.common.text.converters import to_bytes, to_native
from ansible.utils.display import Display
display = Display()
@@ -105,7 +107,7 @@ class CyberarkPassword:
self.extra_parms = []
for key, value in kwargs.items():
self.extra_parms.append('-p')
- self.extra_parms.append("%s=%s" % (key, value))
+ self.extra_parms.append(f"{key}={value}")
if self.appid is None:
raise AnsibleError("CyberArk Error: No Application ID specified")
@@ -130,8 +132,8 @@ class CyberarkPassword:
all_parms = [
CLIPASSWORDSDK_CMD,
'GetPassword',
- '-p', 'AppDescs.AppID=%s' % self.appid,
- '-p', 'Query=%s' % self.query,
+ '-p', f'AppDescs.AppID={self.appid}',
+ '-p', f'Query={self.query}',
'-o', self.output,
'-d', self.b_delimiter]
all_parms.extend(self.extra_parms)
@@ -144,7 +146,7 @@ class CyberarkPassword:
b_credential = to_bytes(tmp_output)
if tmp_error:
- raise AnsibleError("ERROR => %s " % (tmp_error))
+ raise AnsibleError(f"ERROR => {tmp_error} ")
if b_credential and b_credential.endswith(b'\n'):
b_credential = b_credential[:-1]
@@ -164,7 +166,7 @@ class CyberarkPassword:
except subprocess.CalledProcessError as e:
raise AnsibleError(e.output)
except OSError as e:
- raise AnsibleError("ERROR - AIM not installed or clipasswordsdk not in standard location. ERROR=(%s) => %s " % (to_text(e.errno), e.strerror))
+ raise AnsibleError(f"ERROR - AIM not installed or clipasswordsdk not in standard location. ERROR=({e.errno}) => {e.strerror} ")
return [result_dict]
@@ -177,11 +179,11 @@ class LookupModule(LookupBase):
"""
def run(self, terms, variables=None, **kwargs):
- display.vvvv("%s" % terms)
+ display.vvvv(f"{terms}")
if isinstance(terms, list):
return_values = []
for term in terms:
- display.vvvv("Term: %s" % term)
+ display.vvvv(f"Term: {term}")
cyberark_conn = CyberarkPassword(**term)
return_values.append(cyberark_conn.get())
return return_values
diff --git a/plugins/lookup/dependent.py b/plugins/lookup/dependent.py
index fe7fb3c43a..18d2a000d9 100644
--- a/plugins/lookup/dependent.py
+++ b/plugins/lookup/dependent.py
@@ -6,31 +6,30 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
-DOCUMENTATION = """
+DOCUMENTATION = r"""
name: dependent
short_description: Composes a list with nested elements of other lists or dicts which can depend on previous loop variables
author: Felix Fontein (@felixfontein)
version_added: 3.1.0
description:
- - "Takes the input lists and returns a list with elements that are lists, dictionaries,
- or template expressions which evaluate to lists or dicts, composed of the elements of
- the input evaluated lists and dictionaries."
+ - Takes the input lists and returns a list with elements that are lists, dictionaries, or template expressions which evaluate
+ to lists or dicts, composed of the elements of the input evaluated lists and dictionaries.
options:
_terms:
description:
- - A list where the elements are one-element dictionaries, mapping a name to a string, list, or dictionary.
- The name is the index that is used in the result object. The value is iterated over as described below.
+ - A list where the elements are one-element dictionaries, mapping a name to a string, list, or dictionary. The name
+ is the index that is used in the result object. The value is iterated over as described below.
- If the value is a list, it is simply iterated over.
- - If the value is a dictionary, it is iterated over and returned as if they would be processed by the
- P(ansible.builtin.dict2items#filter) filter.
- - If the value is a string, it is evaluated as Jinja2 expressions which can access the previously chosen
- elements with C(item.). The result must be a list or a dictionary.
+ - If the value is a dictionary, it is iterated over and returned as if they would be processed by the P(ansible.builtin.dict2items#filter)
+ filter.
+ - If the value is a string, it is evaluated as Jinja2 expressions which can access the previously chosen elements with
+ C(item.). The result must be a list or a dictionary.
type: list
elements: dict
required: true
"""
-EXAMPLES = """
+EXAMPLES = r"""
- name: Install/remove public keys for active admin users
ansible.posix.authorized_key:
user: "{{ item.admin.key }}"
@@ -76,9 +75,9 @@ EXAMPLES = """
loop_control:
# Makes the output readable, so that it doesn't contain the whole subdictionaries and lists
label: |-
- {{ [item.zone.key, item.prefix.key, item.entry.key,
- item.entry.value.ttl | default(3600),
- item.entry.value.absent | default(False), item.entry.value.value] }}
+ {{ [item.zone.key, item.prefix.key, item.entry.key,
+ item.entry.value.ttl | default(3600),
+ item.entry.value.absent | default(False), item.entry.value.value] }}
with_community.general.dependent:
- zone: dns_setup
- prefix: item.zone.value
@@ -89,47 +88,44 @@ EXAMPLES = """
'':
A:
value:
- - 1.2.3.4
+ - 1.2.3.4
AAAA:
value:
- - "2a01:1:2:3::1"
+ - "2a01:1:2:3::1"
'test._domainkey':
TXT:
ttl: 300
value:
- - '"k=rsa; t=s; p=MIGfMA..."'
+ - '"k=rsa; t=s; p=MIGfMA..."'
example.org:
'www':
A:
value:
- - 1.2.3.4
- - 5.6.7.8
+ - 1.2.3.4
+ - 5.6.7.8
"""
-RETURN = """
- _list:
- description:
- - A list composed of dictionaries whose keys are the variable names from the input list.
- type: list
- elements: dict
- sample:
- - key1: a
- key2: test
- - key1: a
- key2: foo
- - key1: b
- key2: bar
+RETURN = r"""
+_list:
+ description:
+ - A list composed of dictionaries whose keys are the variable names from the input list.
+ type: list
+ elements: dict
+ sample:
+ - key1: a
+ key2: test
+ - key1: a
+ key2: foo
+ - key1: b
+ key2: bar
"""
from ansible.errors import AnsibleLookupError
from ansible.module_utils.common._collections_compat import Mapping, Sequence
from ansible.module_utils.six import string_types
from ansible.plugins.lookup import LookupBase
-from ansible.release import __version__ as ansible_version
from ansible.template import Templar
-from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
-
try:
from ansible.template import trust_as_template as _trust_as_template
HAS_DATATAGGING = True
@@ -137,11 +133,6 @@ except ImportError:
HAS_DATATAGGING = False
-# Whether Templar has a cache, which can be controlled by Templar.template()'s cache option.
-# The cache was removed for ansible-core 2.14 (https://github.com/ansible/ansible/pull/78419)
-_TEMPLAR_HAS_TEMPLATE_CACHE = LooseVersion(ansible_version) < LooseVersion('2.14.0')
-
-
def _make_safe(value):
if HAS_DATATAGGING and isinstance(value, str):
return _trust_as_template(value)
@@ -157,8 +148,6 @@ class LookupModule(LookupBase):
"""
templar.available_variables = variables or {}
quoted_expression = "{0}{1}{2}".format("{{", expression, "}}")
- if _TEMPLAR_HAS_TEMPLATE_CACHE:
- return templar.template(quoted_expression, cache=False)
if hasattr(templar, 'evaluate_expression'):
# This is available since the Data Tagging PR has been merged
return templar.evaluate_expression(_make_safe(expression))
@@ -188,8 +177,7 @@ class LookupModule(LookupBase):
values = self.__evaluate(expression, templar, variables=vars)
except Exception as e:
raise AnsibleLookupError(
- 'Caught "{error}" while evaluating {key!r} with item == {item!r}'.format(
- error=e, key=key, item=current))
+ f'Caught "{e}" while evaluating {key!r} with item == {current!r}')
if isinstance(values, Mapping):
for idx, val in sorted(values.items()):
@@ -201,8 +189,7 @@ class LookupModule(LookupBase):
self.__process(result, terms, index + 1, current, templar, variables)
else:
raise AnsibleLookupError(
- 'Did not obtain dictionary or list while evaluating {key!r} with item == {item!r}, but {type}'.format(
- key=key, item=current, type=type(values)))
+ f'Did not obtain dictionary or list while evaluating {key!r} with item == {current!r}, but {type(values)}')
def run(self, terms, variables=None, **kwargs):
"""Generate list."""
@@ -210,22 +197,23 @@ class LookupModule(LookupBase):
result = []
if len(terms) > 0:
- templar = Templar(loader=self._templar._loader)
+ if HAS_DATATAGGING:
+ templar = self._templar.copy_with_new_env(available_variables={})
+ else:
+ templar = Templar(loader=self._templar._loader)
data = []
vars_so_far = set()
for index, term in enumerate(terms):
if not isinstance(term, Mapping):
raise AnsibleLookupError(
- 'Parameter {index} must be a dictionary, got {type}'.format(
- index=index, type=type(term)))
+ f'Parameter {index} must be a dictionary, got {type(term)}')
if len(term) != 1:
raise AnsibleLookupError(
- 'Parameter {index} must be a one-element dictionary, got {count} elements'.format(
- index=index, count=len(term)))
+ f'Parameter {index} must be a one-element dictionary, got {len(term)} elements')
k, v = list(term.items())[0]
if k in vars_so_far:
raise AnsibleLookupError(
- 'The variable {key!r} appears more than once'.format(key=k))
+ f'The variable {k!r} appears more than once')
vars_so_far.add(k)
if isinstance(v, string_types):
data.append((k, v, None))
@@ -233,7 +221,6 @@ class LookupModule(LookupBase):
data.append((k, None, v))
else:
raise AnsibleLookupError(
- 'Parameter {key!r} (index {index}) must have a value of type string, dictionary or list, got type {type}'.format(
- index=index, key=k, type=type(v)))
+ f'Parameter {k!r} (index {index}) must have a value of type string, dictionary or list, got type {type(v)}')
self.__process(result, data, 0, {}, templar, variables)
return result
diff --git a/plugins/lookup/dig.py b/plugins/lookup/dig.py
index a7768092c5..07fc287d71 100644
--- a/plugins/lookup/dig.py
+++ b/plugins/lookup/dig.py
@@ -6,89 +6,113 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
-DOCUMENTATION = '''
- name: dig
- author: Jan-Piet Mens (@jpmens)
- short_description: query DNS using the dnspython library
- requirements:
- - dnspython (python library, http://www.dnspython.org/)
+DOCUMENTATION = r"""
+name: dig
+author: Jan-Piet Mens (@jpmens)
+short_description: Query DNS using the dnspython library
+requirements:
+ - dnspython (python library, http://www.dnspython.org/)
+description:
+ - The dig lookup runs queries against DNS servers to retrieve DNS records for a specific name (FQDN - fully qualified domain
+ name). It is possible to lookup any DNS record in this manner.
+ - There is a couple of different syntaxes that can be used to specify what record should be retrieved, and for which name.
+ It is also possible to explicitly specify the DNS server(s) to use for lookups.
+ - In its simplest form, the dig lookup plugin can be used to retrieve an IPv4 address (DNS A record) associated with FQDN.
+ - In addition to (default) A record, it is also possible to specify a different record type that should be queried. This
+ can be done by either passing-in additional parameter of format qtype=TYPE to the dig lookup, or by appending /TYPE to
+ the FQDN being queried.
+ - If multiple values are associated with the requested record, the results are returned as a comma-separated list. In
+ such cases you may want to pass option C(wantlist=true) to the lookup call, or alternatively use C(query) instead of C(lookup),
+ which results in the record values being returned as a list over which you can iterate later on.
+ - By default, the lookup relies on system-wide configured DNS servers for performing the query. It is also possible to
+ explicitly specify DNS servers to query using the @DNS_SERVER_1,DNS_SERVER_2,...,DNS_SERVER_N notation. This needs to
+ be passed-in as an additional parameter to the lookup.
+options:
+ _terms:
+ description: Domain(s) to query.
+ type: list
+ elements: str
+ qtype:
description:
- - The dig lookup runs queries against DNS servers to retrieve DNS records for a specific name (FQDN - fully qualified domain name).
- It is possible to lookup any DNS record in this manner.
- - There is a couple of different syntaxes that can be used to specify what record should be retrieved, and for which name.
- It is also possible to explicitly specify the DNS server(s) to use for lookups.
- - In its simplest form, the dig lookup plugin can be used to retrieve an IPv4 address (DNS A record) associated with FQDN
- - In addition to (default) A record, it is also possible to specify a different record type that should be queried.
- This can be done by either passing-in additional parameter of format qtype=TYPE to the dig lookup, or by appending /TYPE to the FQDN being queried.
- - If multiple values are associated with the requested record, the results will be returned as a comma-separated list.
- In such cases you may want to pass option C(wantlist=true) to the lookup call, or alternatively use C(query) instead of C(lookup),
- which will result in the record values being returned as a list over which you can iterate later on.
- - By default, the lookup will rely on system-wide configured DNS servers for performing the query.
- It is also possible to explicitly specify DNS servers to query using the @DNS_SERVER_1,DNS_SERVER_2,...,DNS_SERVER_N notation.
- This needs to be passed-in as an additional parameter to the lookup
- options:
- _terms:
- description: Domain(s) to query.
- type: list
- elements: str
- qtype:
- description:
- - Record type to query.
- - V(DLV) has been removed in community.general 6.0.0.
- - V(CAA) has been added in community.general 6.3.0.
- type: str
- default: 'A'
- choices: [A, ALL, AAAA, CAA, CNAME, DNAME, DNSKEY, DS, HINFO, LOC, MX, NAPTR, NS, NSEC3PARAM, PTR, RP, RRSIG, SOA, SPF, SRV, SSHFP, TLSA, TXT]
- flat:
- description: If 0 each record is returned as a dictionary, otherwise a string.
- type: int
- default: 1
- retry_servfail:
- description: Retry a nameserver if it returns SERVFAIL.
- default: false
- type: bool
- version_added: 3.6.0
- fail_on_error:
- description:
- - Abort execution on lookup errors.
- - The default for this option will likely change to V(true) in the future.
- The current default, V(false), is used for backwards compatibility, and will result in empty strings
- or the string V(NXDOMAIN) in the result in case of errors.
- default: false
- type: bool
- version_added: 5.4.0
- real_empty:
- description:
- - Return empty result without empty strings, and return empty list instead of V(NXDOMAIN).
- - The default for this option will likely change to V(true) in the future.
- - This option will be forced to V(true) if multiple domains to be queried are specified.
- default: false
- type: bool
- version_added: 6.0.0
- class:
- description:
- - "Class."
- type: str
- default: 'IN'
- tcp:
- description: Use TCP to lookup DNS records.
- default: false
- type: bool
- version_added: 7.5.0
- port:
- description: Use port as target port when looking up DNS records.
- default: 53
- type: int
- version_added: 9.5.0
- notes:
- - ALL is not a record per-se, merely the listed fields are available for any record results you retrieve in the form of a dictionary.
- - While the 'dig' lookup plugin supports anything which dnspython supports out of the box, only a subset can be converted into a dictionary.
- - If you need to obtain the AAAA record (IPv6 address), you must specify the record type explicitly.
- Syntax for specifying the record type is shown in the examples below.
- - The trailing dot in most of the examples listed is purely optional, but is specified for completeness/correctness sake.
-'''
+ - Record type to query.
+ - V(DLV) has been removed in community.general 6.0.0.
+ - V(CAA) has been added in community.general 6.3.0.
+ type: str
+ default: 'A'
+ choices:
+ - A
+ - ALL
+ - AAAA
+ - CAA
+ - CNAME
+ - DNAME
+ - DNSKEY
+ - DS
+ - HINFO
+ - LOC
+ - MX
+ - NAPTR
+ - NS
+ - NSEC3PARAM
+ - PTR
+ - RP
+ - RRSIG
+ - SOA
+ - SPF
+ - SRV
+ - SSHFP
+ - TLSA
+ - TXT
+ flat:
+ description: If 0 each record is returned as a dictionary, otherwise a string.
+ type: int
+ default: 1
+ retry_servfail:
+ description: Retry a nameserver if it returns SERVFAIL.
+ default: false
+ type: bool
+ version_added: 3.6.0
+ fail_on_error:
+ description:
+ - Abort execution on lookup errors.
+ - The default for this option is likely to change to V(true) in the future. The current default, V(false), is used for
+ backwards compatibility, and results in empty strings or the string V(NXDOMAIN) in the result in case of errors.
+ default: false
+ type: bool
+ version_added: 5.4.0
+ real_empty:
+ description:
+ - Return empty result without empty strings, and return empty list instead of V(NXDOMAIN).
+ - The default for this option is likely to change to V(true) in the future.
+ - This option is forced to V(true) if multiple domains to be queried are specified.
+ default: false
+ type: bool
+ version_added: 6.0.0
+ class:
+ description:
+ - Class.
+ type: str
+ default: 'IN'
+ tcp:
+ description: Use TCP to lookup DNS records.
+ default: false
+ type: bool
+ version_added: 7.5.0
+ port:
+ description: Use port as target port when looking up DNS records.
+ default: 53
+ type: int
+ version_added: 9.5.0
+notes:
+ - V(ALL) is not a record in itself, merely the listed fields are available for any record results you retrieve in the form
+ of a dictionary.
+ - While the plugin supports anything which C(dnspython) supports out of the box, only a subset can be converted into a dictionary.
+ - If you need to obtain the AAAA record (IPv6 address), you must specify the record type explicitly. Syntax for specifying
+ the record type is shown in the examples below.
+ - The trailing dot in most of the examples listed is purely optional, but is specified for completeness/correctness sake.
+"""
-EXAMPLES = """
+EXAMPLES = r"""
- name: Simple A record (IPV4 address) lookup for example.com
ansible.builtin.debug:
msg: "{{ lookup('community.general.dig', 'example.com.')}}"
@@ -139,88 +163,87 @@ EXAMPLES = """
msg: "{{ lookup('community.general.dig', 'example.org./A', retry_servfail=true) }}"
"""
-RETURN = """
- _list:
- description:
- - List of composed strings or dictionaries with key and value
- If a dictionary, fields shows the keys returned depending on query type
- type: list
- elements: raw
- contains:
- ALL:
- description:
- - owner, ttl, type
- A:
- description:
- - address
- AAAA:
- description:
- - address
- CAA:
- description:
- - flags
- - tag
- - value
- version_added: 6.3.0
- CNAME:
- description:
- - target
- DNAME:
- description:
- - target
- DNSKEY:
- description:
- - flags, algorithm, protocol, key
- DS:
- description:
- - algorithm, digest_type, key_tag, digest
- HINFO:
- description:
- - cpu, os
- LOC:
- description:
- - latitude, longitude, altitude, size, horizontal_precision, vertical_precision
- MX:
- description:
- - preference, exchange
- NAPTR:
- description:
- - order, preference, flags, service, regexp, replacement
- NS:
- description:
- - target
- NSEC3PARAM:
- description:
- - algorithm, flags, iterations, salt
- PTR:
- description:
- - target
- RP:
- description:
- - mbox, txt
- SOA:
- description:
- - mname, rname, serial, refresh, retry, expire, minimum
- SPF:
- description:
- - strings
- SRV:
- description:
- - priority, weight, port, target
- SSHFP:
- description:
- - algorithm, fp_type, fingerprint
- TLSA:
- description:
- - usage, selector, mtype, cert
- TXT:
- description:
- - strings
+RETURN = r"""
+_list:
+ description:
+ - List of composed strings or of dictionaries, with fields depending
+ on query type.
+ type: list
+ elements: raw
+ contains:
+ ALL:
+ description:
+ - C(owner), C(ttl), C(type).
+ A:
+ description:
+ - C(address).
+ AAAA:
+ description:
+ - C(address).
+ CAA:
+ description:
+ - C(flags).
+ - C(tag).
+ - C(value).
+ version_added: 6.3.0
+ CNAME:
+ description:
+ - C(target).
+ DNAME:
+ description:
+ - C(target).
+ DNSKEY:
+ description:
+ - C(flags), C(algorithm), C(protocol), C(key).
+ DS:
+ description:
+ - C(algorithm), C(digest_type), C(key_tag), C(digest).
+ HINFO:
+ description:
+ - C(cpu), C(os).
+ LOC:
+ description:
+ - C(latitude), C(longitude), C(altitude), C(size), C(horizontal_precision), C(vertical_precision).
+ MX:
+ description:
+ - C(preference), C(exchange).
+ NAPTR:
+ description:
+ - C(order), C(preference), C(flags), C(service), C(regexp), C(replacement).
+ NS:
+ description:
+ - C(target).
+ NSEC3PARAM:
+ description:
+ - C(algorithm), C(flags), C(iterations), C(salt).
+ PTR:
+ description:
+ - C(target).
+ RP:
+ description:
+ - C(mbox), C(txt).
+ SOA:
+ description:
+ - C(mname), C(rname), C(serial), C(refresh), C(retry), C(expire), C(minimum).
+ SPF:
+ description:
+ - C(strings).
+ SRV:
+ description:
+ - C(priority), C(weight), C(port), C(target).
+ SSHFP:
+ description:
+ - C(algorithm), C(fp_type), C(fingerprint).
+ TLSA:
+ description:
+ - C(usage), C(selector), C(mtype), C(cert).
+ TXT:
+ description:
+ - C(strings).
"""
from ansible.errors import AnsibleError
from ansible.plugins.lookup import LookupBase
-from ansible.module_utils.common.text.converters import to_native
from ansible.module_utils.parsing.convert_bool import boolean
from ansible.utils.display import Display
import socket
@@ -345,7 +368,7 @@ class LookupModule(LookupBase):
try:
rdclass = dns.rdataclass.from_text(self.get_option('class'))
except Exception as e:
- raise AnsibleError("dns lookup illegal CLASS: %s" % to_native(e))
+ raise AnsibleError(f"dns lookup illegal CLASS: {e}")
myres.retry_servfail = self.get_option('retry_servfail')
for t in terms:
@@ -363,7 +386,7 @@ class LookupModule(LookupBase):
nsaddr = dns.resolver.query(ns)[0].address
nameservers.append(nsaddr)
except Exception as e:
- raise AnsibleError("dns lookup NS: %s" % to_native(e))
+ raise AnsibleError(f"dns lookup NS: {e}")
continue
if '=' in t:
try:
@@ -379,7 +402,7 @@ class LookupModule(LookupBase):
try:
rdclass = dns.rdataclass.from_text(arg)
except Exception as e:
- raise AnsibleError("dns lookup illegal CLASS: %s" % to_native(e))
+ raise AnsibleError(f"dns lookup illegal CLASS: {e}")
elif opt == 'retry_servfail':
myres.retry_servfail = boolean(arg)
elif opt == 'fail_on_error':
@@ -400,7 +423,7 @@ class LookupModule(LookupBase):
else:
domains.append(t)
- # print "--- domain = {0} qtype={1} rdclass={2}".format(domain, qtype, rdclass)
+ # print "--- domain = {domain} qtype={qtype} rdclass={rdclass}"
if port:
myres.port = port
@@ -416,7 +439,7 @@ class LookupModule(LookupBase):
except dns.exception.SyntaxError:
pass
except Exception as e:
- raise AnsibleError("dns.reversename unhandled exception %s" % to_native(e))
+ raise AnsibleError(f"dns.reversename unhandled exception {e}")
domains = reversed_domains
if len(domains) > 1:
@@ -445,20 +468,20 @@ class LookupModule(LookupBase):
ret.append(rd)
except Exception as err:
if fail_on_error:
- raise AnsibleError("Lookup failed: %s" % str(err))
+ raise AnsibleError(f"Lookup failed: {err}")
ret.append(str(err))
except dns.resolver.NXDOMAIN as err:
if fail_on_error:
- raise AnsibleError("Lookup failed: %s" % str(err))
+ raise AnsibleError(f"Lookup failed: {err}")
if not real_empty:
ret.append('NXDOMAIN')
except (dns.resolver.NoAnswer, dns.resolver.Timeout, dns.resolver.NoNameservers) as err:
if fail_on_error:
- raise AnsibleError("Lookup failed: %s" % str(err))
+ raise AnsibleError(f"Lookup failed: {err}")
if not real_empty:
ret.append("")
except dns.exception.DNSException as err:
- raise AnsibleError("dns.resolver unhandled exception %s" % to_native(err))
+ raise AnsibleError(f"dns.resolver unhandled exception {err}")
return ret
diff --git a/plugins/lookup/dnstxt.py b/plugins/lookup/dnstxt.py
index 1ce511b849..fb0a5d5138 100644
--- a/plugins/lookup/dnstxt.py
+++ b/plugins/lookup/dnstxt.py
@@ -6,30 +6,30 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
-DOCUMENTATION = '''
- name: dnstxt
- author: Jan-Piet Mens (@jpmens)
- short_description: query a domain(s)'s DNS txt fields
- requirements:
- - dns/dns.resolver (python library)
+DOCUMENTATION = r"""
+name: dnstxt
+author: Jan-Piet Mens (@jpmens)
+short_description: Query a domain(s)'s DNS txt fields
+requirements:
+ - dns/dns.resolver (python library)
+description:
+ - Uses a python library to return the DNS TXT record for a domain.
+options:
+ _terms:
+ description: Domain or list of domains to query TXT records from.
+ required: true
+ type: list
+ elements: string
+ real_empty:
description:
- - Uses a python library to return the DNS TXT record for a domain.
- options:
- _terms:
- description: domain or list of domains to query TXT records from
- required: true
- type: list
- elements: string
- real_empty:
- description:
- - Return empty result without empty strings, and return empty list instead of V(NXDOMAIN).
- - The default for this option will likely change to V(true) in the future.
- default: false
- type: bool
- version_added: 6.0.0
-'''
+ - Return empty result without empty strings, and return empty list instead of V(NXDOMAIN).
+ - The default for this option is likely to change to V(true) in the future.
+ default: false
+ type: bool
+ version_added: 6.0.0
+"""
-EXAMPLES = """
+EXAMPLES = r"""
- name: show txt entry
ansible.builtin.debug:
msg: "{{lookup('community.general.dnstxt', ['test.example.com'])}}"
@@ -48,11 +48,11 @@ EXAMPLES = """
with_community.general.dnstxt: "{{lookup('community.general.dnstxt', ['test.example.com']).split(',')}}"
"""
-RETURN = """
- _list:
- description:
- - values returned by the DNS TXT record.
- type: list
+RETURN = r"""
+_list:
+ description:
+ - Values returned by the DNS TXT record.
+ type: list
"""
HAVE_DNS = False
@@ -64,7 +64,6 @@ except ImportError:
pass
from ansible.errors import AnsibleError
-from ansible.module_utils.common.text.converters import to_native
from ansible.plugins.lookup import LookupBase
# ==============================================================
@@ -108,7 +107,7 @@ class LookupModule(LookupBase):
continue
string = ''
except DNSException as e:
- raise AnsibleError("dns.resolver unhandled exception %s" % to_native(e))
+ raise AnsibleError(f"dns.resolver unhandled exception {e}")
ret.append(''.join(string))
diff --git a/plugins/lookup/dsv.py b/plugins/lookup/dsv.py
index 5e26c43af4..0b34b3ce31 100644
--- a/plugins/lookup/dsv.py
+++ b/plugins/lookup/dsv.py
@@ -12,81 +12,78 @@ author: Adam Migus (@amigus)
short_description: Get secrets from Thycotic DevOps Secrets Vault
version_added: 1.0.0
description:
- - Uses the Thycotic DevOps Secrets Vault Python SDK to get Secrets from a
- DSV O(tenant) using a O(client_id) and O(client_secret).
+ - Uses the Thycotic DevOps Secrets Vault Python SDK to get Secrets from a DSV O(tenant) using a O(client_id) and O(client_secret).
requirements:
- - python-dsv-sdk - https://pypi.org/project/python-dsv-sdk/
+ - python-dsv-sdk - https://pypi.org/project/python-dsv-sdk/
options:
- _terms:
- description: The path to the secret, for example V(/staging/servers/web1).
- required: true
- tenant:
- description: The first format parameter in the default O(url_template).
- type: string
- env:
- - name: DSV_TENANT
- ini:
- - section: dsv_lookup
- key: tenant
- required: true
- tld:
- default: com
- description: The top-level domain of the tenant; the second format
- parameter in the default O(url_template).
- type: string
- env:
- - name: DSV_TLD
- ini:
- - section: dsv_lookup
- key: tld
- required: false
- client_id:
- description: The client_id with which to request the Access Grant.
- type: string
- env:
- - name: DSV_CLIENT_ID
- ini:
- - section: dsv_lookup
- key: client_id
- required: true
- client_secret:
- description: The client secret associated with the specific O(client_id).
- type: string
- env:
- - name: DSV_CLIENT_SECRET
- ini:
- - section: dsv_lookup
- key: client_secret
- required: true
- url_template:
- default: https://{}.secretsvaultcloud.{}/v1
- description: The path to prepend to the base URL to form a valid REST
- API request.
- type: string
- env:
- - name: DSV_URL_TEMPLATE
- ini:
- - section: dsv_lookup
- key: url_template
- required: false
+ _terms:
+ description: The path to the secret, for example V(/staging/servers/web1).
+ required: true
+ tenant:
+ description: The first format parameter in the default O(url_template).
+ type: string
+ env:
+ - name: DSV_TENANT
+ ini:
+ - section: dsv_lookup
+ key: tenant
+ required: true
+ tld:
+ default: com
+ description: The top-level domain of the tenant; the second format parameter in the default O(url_template).
+ type: string
+ env:
+ - name: DSV_TLD
+ ini:
+ - section: dsv_lookup
+ key: tld
+ required: false
+ client_id:
+ description: The client_id with which to request the Access Grant.
+ type: string
+ env:
+ - name: DSV_CLIENT_ID
+ ini:
+ - section: dsv_lookup
+ key: client_id
+ required: true
+ client_secret:
+ description: The client secret associated with the specific O(client_id).
+ type: string
+ env:
+ - name: DSV_CLIENT_SECRET
+ ini:
+ - section: dsv_lookup
+ key: client_secret
+ required: true
+ url_template:
+ default: https://{}.secretsvaultcloud.{}/v1
+ description: The path to prepend to the base URL to form a valid REST API request.
+ type: string
+ env:
+ - name: DSV_URL_TEMPLATE
+ ini:
+ - section: dsv_lookup
+ key: url_template
+ required: false
"""
RETURN = r"""
_list:
- description:
- - One or more JSON responses to C(GET /secrets/{path}).
- - See U(https://dsv.thycotic.com/api/index.html#operation/getSecret).
- type: list
- elements: dict
+ description:
+ - One or more JSON responses to C(GET /secrets/{path}).
+ - See U(https://dsv.thycotic.com/api/index.html#operation/getSecret).
+ type: list
+ elements: dict
"""
EXAMPLES = r"""
- hosts: localhost
vars:
- secret: "{{ lookup('community.general.dsv', '/test/secret') }}"
+ secret: "{{ lookup('community.general.dsv', '/test/secret') }}"
tasks:
- - ansible.builtin.debug:
- msg: 'the password is {{ secret["data"]["password"] }}'
+ - ansible.builtin.debug:
+ msg: 'the password is {{ secret["data"]["password"] }}'
"""
from ansible.errors import AnsibleError, AnsibleOptionsError
@@ -135,17 +132,17 @@ class LookupModule(LookupBase):
result = []
for term in terms:
- display.debug("dsv_lookup term: %s" % term)
+ display.debug(f"dsv_lookup term: {term}")
try:
path = term.lstrip("[/:]")
if path == "":
- raise AnsibleOptionsError("Invalid secret path: %s" % term)
+ raise AnsibleOptionsError(f"Invalid secret path: {term}")
- display.vvv(u"DevOps Secrets Vault GET /secrets/%s" % path)
+ display.vvv(f"DevOps Secrets Vault GET /secrets/{path}")
result.append(vault.get_secret_json(path))
except SecretsVaultError as error:
raise AnsibleError(
- "DevOps Secrets Vault lookup failure: %s" % error.message
+ f"DevOps Secrets Vault lookup failure: {error.message}"
)
return result
diff --git a/plugins/lookup/etcd.py b/plugins/lookup/etcd.py
index 1dec890b20..d8d992e79f 100644
--- a/plugins/lookup/etcd.py
+++ b/plugins/lookup/etcd.py
@@ -8,46 +8,46 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
-DOCUMENTATION = '''
- author:
- - Jan-Piet Mens (@jpmens)
- name: etcd
- short_description: get info from an etcd server
+DOCUMENTATION = r"""
+author:
+ - Jan-Piet Mens (@jpmens)
+name: etcd
+short_description: Get info from an etcd server
+description:
+ - Retrieves data from an etcd server.
+options:
+ _terms:
description:
- - Retrieves data from an etcd server
- options:
- _terms:
- description:
- - the list of keys to lookup on the etcd server
- type: list
- elements: string
- required: true
- url:
- description:
- - Environment variable with the URL for the etcd server
- type: string
- default: 'http://127.0.0.1:4001'
- env:
- - name: ANSIBLE_ETCD_URL
- version:
- description:
- - Environment variable with the etcd protocol version
- type: string
- default: 'v1'
- env:
- - name: ANSIBLE_ETCD_VERSION
- validate_certs:
- description:
- - toggle checking that the ssl certificates are valid, you normally only want to turn this off with self-signed certs.
- default: true
- type: boolean
- seealso:
- - module: community.general.etcd3
- - plugin: community.general.etcd3
- plugin_type: lookup
-'''
+ - The list of keys to lookup on the etcd server.
+ type: list
+ elements: string
+ required: true
+ url:
+ description:
+ - Environment variable with the URL for the etcd server.
+ type: string
+ default: 'http://127.0.0.1:4001'
+ env:
+ - name: ANSIBLE_ETCD_URL
+ version:
+ description:
+ - Environment variable with the etcd protocol version.
+ type: string
+ default: 'v1'
+ env:
+ - name: ANSIBLE_ETCD_VERSION
+ validate_certs:
+ description:
+ - Toggle checking that the ssl certificates are valid, you normally only want to turn this off with self-signed certs.
+ default: true
+ type: boolean
+seealso:
+ - module: community.general.etcd3
+ - plugin: community.general.etcd3
+ plugin_type: lookup
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: "a value from a locally running etcd"
ansible.builtin.debug:
msg: "{{ lookup('community.general.etcd', 'foo/bar') }}"
@@ -59,15 +59,15 @@ EXAMPLES = '''
- name: "you can set server options inline"
ansible.builtin.debug:
msg: "{{ lookup('community.general.etcd', 'foo', version='v2', url='http://192.168.0.27:4001') }}"
-'''
+"""
-RETURN = '''
- _raw:
- description:
- - List of values associated with input keys.
- type: list
- elements: string
-'''
+RETURN = r"""
+_raw:
+ description:
+ - List of values associated with input keys.
+ type: list
+ elements: string
+"""
import json
@@ -104,7 +104,7 @@ class Etcd:
def __init__(self, url, version, validate_certs):
self.url = url
self.version = version
- self.baseurl = '%s/%s/keys' % (self.url, self.version)
+ self.baseurl = f'{self.url}/{self.version}/keys'
self.validate_certs = validate_certs
def _parse_node(self, node):
@@ -125,7 +125,7 @@ class Etcd:
return path
def get(self, key):
- url = "%s/%s?recursive=true" % (self.baseurl, key)
+ url = f"{self.baseurl}/{key}?recursive=true"
data = None
value = {}
try:
diff --git a/plugins/lookup/etcd3.py b/plugins/lookup/etcd3.py
index 0bda006e34..2af1e9052b 100644
--- a/plugins/lookup/etcd3.py
+++ b/plugins/lookup/etcd3.py
@@ -7,101 +7,101 @@
from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = '''
- author:
- - Eric Belhomme (@eric-belhomme)
- version_added: '0.2.0'
- name: etcd3
- short_description: Get key values from etcd3 server
+DOCUMENTATION = r"""
+author:
+ - Eric Belhomme (@eric-belhomme)
+version_added: '0.2.0'
+name: etcd3
+short_description: Get key values from etcd3 server
+description:
+ - Retrieves key values and/or key prefixes from etcd3 server using its native gRPC API.
+ - Try to reuse M(community.general.etcd3) options for connection parameters, but add support for some E(ETCDCTL_*) environment
+ variables.
+ - See U(https://github.com/etcd-io/etcd/tree/master/Documentation/op-guide) for etcd overview.
+options:
+ _terms:
description:
- - Retrieves key values and/or key prefixes from etcd3 server using its native gRPC API.
- - Try to reuse M(community.general.etcd3) options for connection parameters, but add support for some C(ETCDCTL_*) environment variables.
- - See U(https://github.com/etcd-io/etcd/tree/master/Documentation/op-guide) for etcd overview.
+ - The list of keys (or key prefixes) to look up on the etcd3 server.
+ type: list
+ elements: str
+ required: true
+ prefix:
+ description:
+ - Look for key or prefix key.
+ type: bool
+ default: false
+ endpoints:
+ description:
+ - Counterpart of E(ETCDCTL_ENDPOINTS) environment variable. Specify the etcd3 connection with an URL form, for example
+ V(https://hostname:2379), or V(:) form.
+ - The V(host) part is overwritten by O(host) option, if defined.
+ - The V(port) part is overwritten by O(port) option, if defined.
+ env:
+ - name: ETCDCTL_ENDPOINTS
+ default: '127.0.0.1:2379'
+ type: str
+ host:
+ description:
+ - Etcd3 listening client host.
+ - Takes precedence over O(endpoints).
+ type: str
+ port:
+ description:
+ - Etcd3 listening client port.
+ - Takes precedence over O(endpoints).
+ type: int
+ ca_cert:
+ description:
+ - Etcd3 CA authority.
+ env:
+ - name: ETCDCTL_CACERT
+ type: str
+ cert_cert:
+ description:
+ - Etcd3 client certificate.
+ env:
+ - name: ETCDCTL_CERT
+ type: str
+ cert_key:
+ description:
+ - Etcd3 client private key.
+ env:
+ - name: ETCDCTL_KEY
+ type: str
+ timeout:
+ description:
+ - Client timeout.
+ default: 60
+ env:
+ - name: ETCDCTL_DIAL_TIMEOUT
+ type: int
+ user:
+ description:
+ - Authenticated user name.
+ env:
+ - name: ETCDCTL_USER
+ type: str
+ password:
+ description:
+ - Authenticated user password.
+ env:
+ - name: ETCDCTL_PASSWORD
+ type: str
- options:
- _terms:
- description:
- - The list of keys (or key prefixes) to look up on the etcd3 server.
- type: list
- elements: str
- required: true
- prefix:
- description:
- - Look for key or prefix key.
- type: bool
- default: false
- endpoints:
- description:
- - Counterpart of E(ETCDCTL_ENDPOINTS) environment variable.
- Specify the etcd3 connection with and URL form, for example V(https://hostname:2379), or V(:) form.
- - The V(host) part is overwritten by O(host) option, if defined.
- - The V(port) part is overwritten by O(port) option, if defined.
- env:
- - name: ETCDCTL_ENDPOINTS
- default: '127.0.0.1:2379'
- type: str
- host:
- description:
- - etcd3 listening client host.
- - Takes precedence over O(endpoints).
- type: str
- port:
- description:
- - etcd3 listening client port.
- - Takes precedence over O(endpoints).
- type: int
- ca_cert:
- description:
- - etcd3 CA authority.
- env:
- - name: ETCDCTL_CACERT
- type: str
- cert_cert:
- description:
- - etcd3 client certificate.
- env:
- - name: ETCDCTL_CERT
- type: str
- cert_key:
- description:
- - etcd3 client private key.
- env:
- - name: ETCDCTL_KEY
- type: str
- timeout:
- description:
- - Client timeout.
- default: 60
- env:
- - name: ETCDCTL_DIAL_TIMEOUT
- type: int
- user:
- description:
- - Authenticated user name.
- env:
- - name: ETCDCTL_USER
- type: str
- password:
- description:
- - Authenticated user password.
- env:
- - name: ETCDCTL_PASSWORD
- type: str
+notes:
+ - O(host) and O(port) options take precedence over (endpoints) option.
+ - The recommended way to connect to etcd3 server is using E(ETCDCTL_ENDPOINT) environment variable and keep O(endpoints),
+ O(host), and O(port) unused.
+seealso:
+ - module: community.general.etcd3
+ - plugin: community.general.etcd
+ plugin_type: lookup
- notes:
- - O(host) and O(port) options take precedence over (endpoints) option.
- - The recommended way to connect to etcd3 server is using E(ETCDCTL_ENDPOINT)
- environment variable and keep O(endpoints), O(host), and O(port) unused.
- seealso:
- - module: community.general.etcd3
- - plugin: community.general.etcd
- plugin_type: lookup
+requirements:
+ - "etcd3 >= 0.10"
+"""
- requirements:
- - "etcd3 >= 0.10"
-'''
-
-EXAMPLES = '''
+EXAMPLES = r"""
- name: "a value from a locally running etcd"
ansible.builtin.debug:
msg: "{{ lookup('community.general.etcd3', 'foo/bar') }}"
@@ -117,22 +117,22 @@ EXAMPLES = '''
- name: "connect to etcd3 with a client certificate"
ansible.builtin.debug:
msg: "{{ lookup('community.general.etcd3', 'foo/bar', cert_cert='/etc/ssl/etcd/client.pem', cert_key='/etc/ssl/etcd/client.key') }}"
-'''
+"""
-RETURN = '''
- _raw:
- description:
- - List of keys and associated values.
- type: list
- elements: dict
- contains:
- key:
- description: The element's key.
- type: str
- value:
- description: The element's value.
- type: str
-'''
+RETURN = r"""
+_raw:
+ description:
+ - List of keys and associated values.
+ type: list
+ elements: dict
+ contains:
+ key:
+ description: The element's key.
+ type: str
+ value:
+ description: The element's value.
+ type: str
+"""
import re
@@ -168,7 +168,7 @@ def etcd3_client(client_params):
etcd = etcd3.client(**client_params)
etcd.status()
except Exception as exp:
- raise AnsibleLookupError('Cannot connect to etcd cluster: %s' % (to_native(exp)))
+ raise AnsibleLookupError(f'Cannot connect to etcd cluster: {exp}')
return etcd
@@ -204,7 +204,7 @@ class LookupModule(LookupBase):
cnx_log = dict(client_params)
if 'password' in cnx_log:
cnx_log['password'] = ''
- display.verbose("etcd3 connection parameters: %s" % cnx_log)
+ display.verbose(f"etcd3 connection parameters: {cnx_log}")
# connect to etcd3 server
etcd = etcd3_client(client_params)
@@ -218,12 +218,12 @@ class LookupModule(LookupBase):
if val and meta:
ret.append({'key': to_native(meta.key), 'value': to_native(val)})
except Exception as exp:
- display.warning('Caught except during etcd3.get_prefix: %s' % (to_native(exp)))
+ display.warning(f'Caught except during etcd3.get_prefix: {exp}')
else:
try:
val, meta = etcd.get(term)
if val and meta:
ret.append({'key': to_native(meta.key), 'value': to_native(val)})
except Exception as exp:
- display.warning('Caught except during etcd3.get: %s' % (to_native(exp)))
+ display.warning(f'Caught except during etcd3.get: {exp}')
return ret
diff --git a/plugins/lookup/filetree.py b/plugins/lookup/filetree.py
index ee7bfe27b7..24e0c20eea 100644
--- a/plugins/lookup/filetree.py
+++ b/plugins/lookup/filetree.py
@@ -6,22 +6,23 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
-DOCUMENTATION = r'''
+DOCUMENTATION = r"""
name: filetree
author: Dag Wieers (@dagwieers)
-short_description: recursively match all files in a directory tree
+short_description: Recursively match all files in a directory tree
description:
-- This lookup enables you to template a complete tree of files on a target system while retaining permissions and ownership.
-- Supports directories, files and symlinks, including SELinux and other file properties.
-- If you provide more than one path, it will implement a first_found logic, and will not process entries it already processed in previous paths.
- This enables merging different trees in order of importance, or add role_vars to specific paths to influence different instances of the same role.
+ - This lookup enables you to template a complete tree of files on a target system while retaining permissions and ownership.
+ - Supports directories, files and symlinks, including SELinux and other file properties.
+ - If you provide more than one path, it implements a first_found logic, and does not process entries it already processed
+ in previous paths. This enables merging different trees in order of importance, or add role_vars to specific paths to
+ influence different instances of the same role.
options:
_terms:
description: Path(s) of files to read.
required: true
type: list
elements: string
-'''
+"""
EXAMPLES = r"""
- name: Create directories
@@ -59,61 +60,61 @@ EXAMPLES = r"""
"""
RETURN = r"""
- _raw:
- description: List of dictionaries with file information.
- type: list
- elements: dict
- contains:
- src:
- description:
- - Full path to file.
- - Not returned when RV(_raw[].state) is set to V(directory).
- type: path
- root:
- description: Allows filtering by original location.
- type: path
- path:
- description: Contains the relative path to root.
- type: path
- mode:
- description: The permissions the resulting file or directory.
- type: str
- state:
- description: TODO
- type: str
- owner:
- description: Name of the user that owns the file/directory.
- type: raw
- group:
- description: Name of the group that owns the file/directory.
- type: raw
- seuser:
- description: The user part of the SELinux file context.
- type: raw
- serole:
- description: The role part of the SELinux file context.
- type: raw
- setype:
- description: The type part of the SELinux file context.
- type: raw
- selevel:
- description: The level part of the SELinux file context.
- type: raw
- uid:
- description: Owner ID of the file/directory.
- type: int
- gid:
- description: Group ID of the file/directory.
- type: int
- size:
- description: Size of the target.
- type: int
- mtime:
- description: Time of last modification.
- type: float
- ctime:
- description: Time of last metadata update or creation (depends on OS).
- type: float
+_raw:
+ description: List of dictionaries with file information.
+ type: list
+ elements: dict
+ contains:
+ src:
+ description:
+ - Full path to file.
+ - Not returned when RV(_raw[].state) is set to V(directory).
+ type: path
+ root:
+ description: Allows filtering by original location.
+ type: path
+ path:
+ description: Contains the relative path to root.
+ type: path
+ mode:
+ description: The permissions the resulting file or directory.
+ type: str
+ state:
+ description: TODO.
+ type: str
+ owner:
+ description: Name of the user that owns the file/directory.
+ type: raw
+ group:
+ description: Name of the group that owns the file/directory.
+ type: raw
+ seuser:
+ description: The user part of the SELinux file context.
+ type: raw
+ serole:
+ description: The role part of the SELinux file context.
+ type: raw
+ setype:
+ description: The type part of the SELinux file context.
+ type: raw
+ selevel:
+ description: The level part of the SELinux file context.
+ type: raw
+ uid:
+ description: Owner ID of the file/directory.
+ type: int
+ gid:
+ description: Group ID of the file/directory.
+ type: int
+ size:
+ description: Size of the target.
+ type: int
+ mtime:
+ description: Time of last modification.
+ type: float
+ ctime:
+ description: Time of last metadata update or creation (depends on OS).
+ type: float
"""
import os
import pwd
@@ -158,7 +159,7 @@ def file_props(root, path):
try:
st = os.lstat(abspath)
except OSError as e:
- display.warning('filetree: Error using stat() on path %s (%s)' % (abspath, e))
+ display.warning(f'filetree: Error using stat() on path {abspath} ({e})')
return None
ret = dict(root=root, path=path)
@@ -172,7 +173,7 @@ def file_props(root, path):
ret['state'] = 'file'
ret['src'] = abspath
else:
- display.warning('filetree: Error file type of %s is not supported' % abspath)
+ display.warning(f'filetree: Error file type of {abspath} is not supported')
return None
ret['uid'] = st.st_uid
@@ -185,7 +186,7 @@ def file_props(root, path):
ret['group'] = to_text(grp.getgrgid(st.st_gid).gr_name)
except KeyError:
ret['group'] = st.st_gid
- ret['mode'] = '0%03o' % (stat.S_IMODE(st.st_mode))
+ ret['mode'] = f'0{stat.S_IMODE(st.st_mode):03o}'
ret['size'] = st.st_size
ret['mtime'] = st.st_mtime
ret['ctime'] = st.st_ctime
@@ -212,7 +213,7 @@ class LookupModule(LookupBase):
term_file = os.path.basename(term)
dwimmed_path = self._loader.path_dwim_relative(basedir, 'files', os.path.dirname(term))
path = os.path.join(dwimmed_path, term_file)
- display.debug("Walking '{0}'".format(path))
+ display.debug(f"Walking '{path}'")
for root, dirs, files in os.walk(path, topdown=True):
for entry in dirs + files:
relpath = os.path.relpath(os.path.join(root, entry), path)
@@ -221,7 +222,7 @@ class LookupModule(LookupBase):
if relpath not in [entry['path'] for entry in ret]:
props = file_props(path, relpath)
if props is not None:
- display.debug(" found '{0}'".format(os.path.join(path, relpath)))
+ display.debug(f" found '{os.path.join(path, relpath)}'")
ret.append(props)
return ret
diff --git a/plugins/lookup/flattened.py b/plugins/lookup/flattened.py
index 0071417a0d..de4a21fbdd 100644
--- a/plugins/lookup/flattened.py
+++ b/plugins/lookup/flattened.py
@@ -6,35 +6,35 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
-DOCUMENTATION = '''
- name: flattened
- author: Serge van Ginderachter (!UNKNOWN)
- short_description: return single list completely flattened
- description:
- - Given one or more lists, this lookup will flatten any list elements found recursively until only 1 list is left.
- options:
- _terms:
- description: lists to flatten
- type: list
- elements: raw
- required: true
- notes:
- - Unlike the P(ansible.builtin.items#lookup) lookup which only flattens 1 level,
- this plugin will continue to flatten until it cannot find lists anymore.
- - Aka highlander plugin, there can only be one (list).
-'''
+DOCUMENTATION = r"""
+name: flattened
+author: Serge van Ginderachter (!UNKNOWN)
+short_description: Return single list completely flattened
+description:
+ - Given one or more lists, this lookup flattens any list elements found recursively until only 1 list is left.
+options:
+ _terms:
+ description: Lists to flatten.
+ type: list
+ elements: raw
+ required: true
+notes:
+ - Unlike the P(ansible.builtin.items#lookup) lookup which only flattens 1 level, this plugin continues to flatten until
+ it cannot find lists anymore.
+ - Aka highlander plugin, there can only be one (list).
+"""
-EXAMPLES = """
+EXAMPLES = r"""
- name: "'unnest' all elements into single list"
ansible.builtin.debug:
msg: "all in one list {{lookup('community.general.flattened', [1,2,3,[5,6]], ['a','b','c'], [[5,6,1,3], [34,'a','b','c']])}}"
"""
-RETURN = """
- _raw:
- description:
- - flattened list
- type: list
+RETURN = r"""
+_raw:
+ description:
+ - Flattened list.
+ type: list
"""
from ansible.errors import AnsibleError
from ansible.module_utils.six import string_types
@@ -67,18 +67,13 @@ class LookupModule(LookupBase):
if isinstance(term, string_types):
# convert a variable to a list
- try:
- term2 = listify_lookup_plugin_terms(term, templar=self._templar)
- except TypeError:
- # The loader argument is deprecated in ansible-core 2.14+. Fall back to
- # pre-2.14 behavior for older ansible-core versions.
- term2 = listify_lookup_plugin_terms(term, templar=self._templar, loader=self._loader)
+ term2 = listify_lookup_plugin_terms(term, templar=self._templar)
# but avoid converting a plain string to a list of one string
if term2 != [term]:
term = term2
if isinstance(term, list):
- # if it's a list, check recursively for items that are a list
+ # if it is a list, check recursively for items that are a list
term = self._do_flatten(term, variables)
ret.extend(term)
else:
diff --git a/plugins/lookup/github_app_access_token.py b/plugins/lookup/github_app_access_token.py
index 3ce695942d..dbc8cde3b5 100644
--- a/plugins/lookup/github_app_access_token.py
+++ b/plugins/lookup/github_app_access_token.py
@@ -5,43 +5,49 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
-DOCUMENTATION = '''
- name: github_app_access_token
- author:
- - Poh Wei Sheng (@weisheng-p)
- short_description: Obtain short-lived Github App Access tokens
- version_added: '8.2.0'
- requirements:
- - jwt (https://github.com/GehirnInc/python-jwt)
+DOCUMENTATION = r"""
+name: github_app_access_token
+author:
+ - Poh Wei Sheng (@weisheng-p)
+short_description: Obtain short-lived Github App Access tokens
+version_added: '8.2.0'
+requirements:
+ - jwt (https://github.com/GehirnInc/python-jwt)
+description:
+ - This generates a Github access token that can be used with a C(git) command, if you use a Github App.
+options:
+ key_path:
description:
- - This generates a Github access token that can be used with a C(git) command, if you use a Github App.
- options:
- key_path:
- description:
- - Path to your private key.
- required: true
- type: path
- app_id:
- description:
- - Your GitHub App ID, you can find this in the Settings page.
- required: true
- type: str
- installation_id:
- description:
- - The installation ID that contains the git repository you would like access to.
- - As of 2023-12-24, this can be found via Settings page > Integrations > Application. The last part of the URL in the
- configure button is the installation ID.
- - Alternatively, you can use PyGithub (U(https://github.com/PyGithub/PyGithub)) to get your installation ID.
- required: true
- type: str
- token_expiry:
- description:
- - How long the token should last for in seconds.
- default: 600
- type: int
-'''
+ - Path to your private key.
+ - Either O(key_path) or O(private_key) must be specified.
+ type: path
+ app_id:
+ description:
+ - Your GitHub App ID, you can find this in the Settings page.
+ required: true
+ type: str
+ installation_id:
+ description:
+ - The installation ID that contains the git repository you would like access to.
+ - As of 2023-12-24, this can be found at Settings page > Integrations > Application. The last part of the URL in the
+ configure button is the installation ID.
+ - Alternatively, you can use PyGithub (U(https://github.com/PyGithub/PyGithub)) to get your installation ID.
+ required: true
+ type: str
+ private_key:
+ description:
+ - GitHub App private key in PEM file format as string.
+ - Either O(key_path) or O(private_key) must be specified.
+ type: str
+ version_added: 10.0.0
+ token_expiry:
+ description:
+ - How long the token should last for in seconds.
+ default: 600
+ type: int
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Get access token to be used for git checkout with app_id=123456, installation_id=64209
ansible.builtin.git:
repo: >-
@@ -51,14 +57,14 @@ EXAMPLES = '''
github_token: >-
{{ lookup('community.general.github_app_access_token', key_path='/home/to_your/key',
app_id='123456', installation_id='64209') }}
-'''
+"""
-RETURN = '''
- _raw:
- description: A one-element list containing your GitHub access token.
- type: list
- elements: str
-'''
+RETURN = r"""
+_raw:
+ description: A one-element list containing your GitHub access token.
+ type: list
+ elements: str
+"""
try:
@@ -71,7 +77,7 @@ import time
import json
from ansible.module_utils.urls import open_url
from ansible.module_utils.six.moves.urllib.error import HTTPError
-from ansible.errors import AnsibleError
+from ansible.errors import AnsibleError, AnsibleOptionsError
from ansible.plugins.lookup import LookupBase
from ansible.utils.display import Display
@@ -84,12 +90,14 @@ else:
display = Display()
-def read_key(path):
+def read_key(path, private_key=None):
try:
+ if private_key:
+ return jwk_from_pem(private_key.encode('utf-8'))
with open(path, 'rb') as pem_file:
return jwk_from_pem(pem_file.read())
except Exception as e:
- raise AnsibleError("Error while parsing key file: {0}".format(e))
+ raise AnsibleError(f"Error while parsing key file: {e}")
def encode_jwt(app_id, jwk, exp=600):
@@ -102,7 +110,7 @@ def encode_jwt(app_id, jwk, exp=600):
try:
return jwt_instance.encode(payload, jwk, alg='RS256')
except Exception as e:
- raise AnsibleError("Error while encoding jwt: {0}".format(e))
+ raise AnsibleError(f"Error while encoding jwt: {e}")
def post_request(generated_jwt, installation_id):
@@ -116,24 +124,24 @@ def post_request(generated_jwt, installation_id):
except HTTPError as e:
try:
error_body = json.loads(e.read().decode())
- display.vvv("Error returned: {0}".format(error_body))
+ display.vvv(f"Error returned: {error_body}")
except Exception:
error_body = {}
if e.code == 404:
raise AnsibleError("Github return error. Please confirm your installationd_id value is valid")
elif e.code == 401:
raise AnsibleError("Github return error. Please confirm your private key is valid")
- raise AnsibleError("Unexpected data returned: {0} -- {1}".format(e, error_body))
+ raise AnsibleError(f"Unexpected data returned: {e} -- {error_body}")
response_body = response.read()
try:
json_data = json.loads(response_body.decode('utf-8'))
except json.decoder.JSONDecodeError as e:
- raise AnsibleError("Error while dencoding JSON respone from github: {0}".format(e))
+ raise AnsibleError(f"Error while dencoding JSON respone from github: {e}")
return json_data.get('token')
-def get_token(key_path, app_id, installation_id, expiry=600):
- jwk = read_key(key_path)
+def get_token(key_path, app_id, installation_id, private_key, expiry=600):
+ jwk = read_key(key_path, private_key)
generated_jwt = encode_jwt(app_id, jwk, exp=expiry)
return post_request(generated_jwt, installation_id)
@@ -146,10 +154,16 @@ class LookupModule(LookupBase):
self.set_options(var_options=variables, direct=kwargs)
+ if not (self.get_option("key_path") or self.get_option("private_key")):
+ raise AnsibleOptionsError("One of key_path or private_key is required")
+ if self.get_option("key_path") and self.get_option("private_key"):
+ raise AnsibleOptionsError("key_path and private_key are mutually exclusive")
+
t = get_token(
self.get_option('key_path'),
self.get_option('app_id'),
self.get_option('installation_id'),
+ self.get_option('private_key'),
self.get_option('token_expiry'),
)
diff --git a/plugins/lookup/hiera.py b/plugins/lookup/hiera.py
index 02669c98dc..27f133d78a 100644
--- a/plugins/lookup/hiera.py
+++ b/plugins/lookup/hiera.py
@@ -6,40 +6,40 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
-DOCUMENTATION = '''
- author:
- - Juan Manuel Parrilla (@jparrill)
- name: hiera
- short_description: get info from hiera data
- requirements:
- - hiera (command line utility)
+DOCUMENTATION = r"""
+author:
+ - Juan Manuel Parrilla (@jparrill)
+name: hiera
+short_description: Get info from hiera data
+requirements:
+ - hiera (command line utility)
+description:
+ - Retrieves data from an Puppetmaster node using Hiera as ENC.
+options:
+ _terms:
description:
- - Retrieves data from an Puppetmaster node using Hiera as ENC.
- options:
- _terms:
- description:
- - The list of keys to lookup on the Puppetmaster.
- type: list
- elements: string
- required: true
- executable:
- description:
- - Binary file to execute Hiera.
- type: string
- default: '/usr/bin/hiera'
- env:
- - name: ANSIBLE_HIERA_BIN
- config_file:
- description:
- - File that describes the hierarchy of Hiera.
- type: string
- default: '/etc/hiera.yaml'
- env:
- - name: ANSIBLE_HIERA_CFG
+ - The list of keys to lookup on the Puppetmaster.
+ type: list
+ elements: string
+ required: true
+ executable:
+ description:
+ - Binary file to execute Hiera.
+ type: string
+ default: '/usr/bin/hiera'
+ env:
+ - name: ANSIBLE_HIERA_BIN
+ config_file:
+ description:
+ - File that describes the hierarchy of Hiera.
+ type: string
+ default: '/etc/hiera.yaml'
+ env:
+ - name: ANSIBLE_HIERA_CFG
# FIXME: incomplete options .. _terms? environment/fqdn?
-'''
+"""
-EXAMPLES = """
+EXAMPLES = r"""
# All this examples depends on hiera.yml that describes the hierarchy
- name: "a value from Hiera 'DB'"
@@ -55,12 +55,12 @@ EXAMPLES = """
msg: "{{ lookup('community.general.hiera', 'foo fqdn=puppet01.localdomain') }}"
"""
-RETURN = """
- _raw:
- description:
- - a value associated with input key
- type: list
- elements: str
+RETURN = r"""
+_raw:
+ description:
+ - A value associated with input key.
+ type: list
+ elements: str
"""
from ansible.plugins.lookup import LookupBase
@@ -79,8 +79,7 @@ class Hiera(object):
pargs.extend(hiera_key)
- rc, output, err = run_cmd("{0} -c {1} {2}".format(
- self.hiera_bin, self.hiera_cfg, hiera_key[0]))
+ rc, output, err = run_cmd(f"{self.hiera_bin} -c {self.hiera_cfg} {hiera_key[0]}")
return to_text(output.strip())
diff --git a/plugins/lookup/keyring.py b/plugins/lookup/keyring.py
index a4c914ed1a..75d808e736 100644
--- a/plugins/lookup/keyring.py
+++ b/plugins/lookup/keyring.py
@@ -7,18 +7,18 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
-DOCUMENTATION = '''
- name: keyring
- author:
- - Samuel Boucher (!UNKNOWN)
- requirements:
- - keyring (python library)
- short_description: grab secrets from the OS keyring
- description:
- - Allows you to access data stored in the OS provided keyring/keychain.
-'''
+DOCUMENTATION = r"""
+name: keyring
+author:
+ - Samuel Boucher (!UNKNOWN)
+requirements:
+ - keyring (python library)
+short_description: Grab secrets from the OS keyring
+description:
+ - Allows you to access data stored in the OS provided keyring/keychain.
+"""
-EXAMPLES = """
+EXAMPLES = r"""
- name: output secrets to screen (BAD IDEA)
ansible.builtin.debug:
msg: "Password: {{item}}"
@@ -31,11 +31,11 @@ EXAMPLES = """
login_user: joe
"""
-RETURN = """
- _raw:
- description: Secrets stored.
- type: list
- elements: str
+RETURN = r"""
+_raw:
+ description: Secrets stored.
+ type: list
+ elements: str
"""
HAS_KEYRING = True
@@ -57,17 +57,17 @@ class LookupModule(LookupBase):
def run(self, terms, variables=None, **kwargs):
if not HAS_KEYRING:
- raise AnsibleError(u"Can't LOOKUP(keyring): missing required python library 'keyring'")
+ raise AnsibleError("Can't LOOKUP(keyring): missing required python library 'keyring'")
self.set_options(var_options=variables, direct=kwargs)
- display.vvvv(u"keyring: %s" % keyring.get_keyring())
+ display.vvvv(f"keyring: {keyring.get_keyring()}")
ret = []
for term in terms:
(servicename, username) = (term.split()[0], term.split()[1])
- display.vvvv(u"username: %s, servicename: %s " % (username, servicename))
+ display.vvvv(f"username: {username}, servicename: {servicename} ")
password = keyring.get_password(servicename, username)
if password is None:
- raise AnsibleError(u"servicename: %s for user %s not found" % (servicename, username))
+ raise AnsibleError(f"servicename: {servicename} for user {username} not found")
ret.append(password.rstrip())
return ret
diff --git a/plugins/lookup/lastpass.py b/plugins/lookup/lastpass.py
index 8eb3090b76..2633848937 100644
--- a/plugins/lookup/lastpass.py
+++ b/plugins/lookup/lastpass.py
@@ -6,39 +6,39 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
-DOCUMENTATION = '''
- name: lastpass
- author:
- - Andrew Zenk (!UNKNOWN)
- requirements:
- - lpass (command line utility)
- - must have already logged into LastPass
- short_description: fetch data from LastPass
- description:
- - Use the lpass command line utility to fetch specific fields from LastPass.
- options:
- _terms:
- description: Key from which you want to retrieve the field.
- required: true
- type: list
- elements: str
- field:
- description: Field to return from LastPass.
- default: 'password'
- type: str
-'''
+DOCUMENTATION = r"""
+name: lastpass
+author:
+ - Andrew Zenk (!UNKNOWN)
+requirements:
+ - lpass (command line utility)
+ - must have already logged into LastPass
+short_description: Fetch data from LastPass
+description:
+ - Use the lpass command line utility to fetch specific fields from LastPass.
+options:
+ _terms:
+ description: Key from which you want to retrieve the field.
+ required: true
+ type: list
+ elements: str
+ field:
+ description: Field to return from LastPass.
+ default: 'password'
+ type: str
+"""
-EXAMPLES = """
+EXAMPLES = r"""
- name: get 'custom_field' from LastPass entry 'entry-name'
ansible.builtin.debug:
msg: "{{ lookup('community.general.lastpass', 'entry-name', field='custom_field') }}"
"""
-RETURN = """
- _raw:
- description: secrets stored
- type: list
- elements: str
+RETURN = r"""
+_raw:
+ description: Secrets stored.
+ type: list
+ elements: str
"""
from subprocess import Popen, PIPE
@@ -83,9 +83,9 @@ class LPass(object):
def get_field(self, key, field):
if field in ['username', 'password', 'url', 'notes', 'id', 'name']:
- out, err = self._run(self._build_args("show", ["--{0}".format(field), key]))
+ out, err = self._run(self._build_args("show", [f"--{field}", key]))
else:
- out, err = self._run(self._build_args("show", ["--field={0}".format(field), key]))
+ out, err = self._run(self._build_args("show", [f"--field={field}", key]))
return out.strip()
diff --git a/plugins/lookup/lmdb_kv.py b/plugins/lookup/lmdb_kv.py
index a37cff9569..b3728abb17 100644
--- a/plugins/lookup/lmdb_kv.py
+++ b/plugins/lookup/lmdb_kv.py
@@ -6,30 +6,30 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
-DOCUMENTATION = '''
- name: lmdb_kv
- author:
- - Jan-Piet Mens (@jpmens)
- version_added: '0.2.0'
- short_description: fetch data from LMDB
- description:
- - This lookup returns a list of results from an LMDB DB corresponding to a list of items given to it.
- requirements:
- - lmdb (Python library U(https://lmdb.readthedocs.io/en/release/))
- options:
- _terms:
- description: List of keys to query.
- type: list
- elements: str
- db:
- description: Path to LMDB database.
- type: str
- default: 'ansible.mdb'
- vars:
- - name: lmdb_kv_db
-'''
+DOCUMENTATION = r"""
+name: lmdb_kv
+author:
+ - Jan-Piet Mens (@jpmens)
+version_added: '0.2.0'
+short_description: Fetch data from LMDB
+description:
+ - This lookup returns a list of results from an LMDB DB corresponding to a list of items given to it.
+requirements:
+ - lmdb (Python library U(https://lmdb.readthedocs.io/en/release/))
+options:
+ _terms:
+ description: List of keys to query.
+ type: list
+ elements: str
+ db:
+ description: Path to LMDB database.
+ type: str
+ default: 'ansible.mdb'
+ vars:
+ - name: lmdb_kv_db
+"""
-EXAMPLES = """
+EXAMPLES = r"""
- name: query LMDB for a list of country codes
ansible.builtin.debug:
msg: "{{ query('community.general.lmdb_kv', 'nl', 'be', 'lu', db='jp.mdb') }}"
@@ -40,7 +40,7 @@ EXAMPLES = """
vars:
- lmdb_kv_db: jp.mdb
with_community.general.lmdb_kv:
- - "n*"
+ - "n*"
- name: get an item by key
ansible.builtin.assert:
@@ -52,9 +52,9 @@ EXAMPLES = """
- be
"""
-RETURN = """
+RETURN = r"""
_raw:
- description: value(s) stored in LMDB
+ description: Value(s) stored in LMDB.
type: list
elements: raw
"""
@@ -96,7 +96,7 @@ class LookupModule(LookupBase):
try:
env = lmdb.open(str(db), readonly=True)
except Exception as e:
- raise AnsibleError("LMDB can't open database %s: %s" % (db, to_native(e)))
+ raise AnsibleError(f"LMDB cannot open database {db}: {e}")
ret = []
if len(terms) == 0:
diff --git a/plugins/lookup/manifold.py b/plugins/lookup/manifold.py
deleted file mode 100644
index 049d453e4f..0000000000
--- a/plugins/lookup/manifold.py
+++ /dev/null
@@ -1,280 +0,0 @@
-# -*- coding: utf-8 -*-
-# Copyright (c) 2018, Arigato Machine Inc.
-# Copyright (c) 2018, Ansible Project
-# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
-# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-DOCUMENTATION = '''
- author:
- - Kyrylo Galanov (!UNKNOWN)
- name: manifold
- short_description: get credentials from Manifold.co
- description:
- - Retrieves resources' credentials from Manifold.co
- options:
- _terms:
- description:
- - Optional list of resource labels to lookup on Manifold.co. If no resources are specified, all
- matched resources will be returned.
- type: list
- elements: string
- required: false
- api_token:
- description:
- - manifold API token
- type: string
- required: true
- env:
- - name: MANIFOLD_API_TOKEN
- project:
- description:
- - The project label you want to get the resource for.
- type: string
- required: false
- team:
- description:
- - The team label you want to get the resource for.
- type: string
- required: false
-'''
-
-EXAMPLES = '''
- - name: all available resources
- ansible.builtin.debug:
- msg: "{{ lookup('community.general.manifold', api_token='SecretToken') }}"
- - name: all available resources for a specific project in specific team
- ansible.builtin.debug:
- msg: "{{ lookup('community.general.manifold', api_token='SecretToken', project='poject-1', team='team-2') }}"
- - name: two specific resources
- ansible.builtin.debug:
- msg: "{{ lookup('community.general.manifold', 'resource-1', 'resource-2') }}"
-'''
-
-RETURN = '''
- _raw:
- description:
- - dictionary of credentials ready to be consumed as environment variables. If multiple resources define
- the same environment variable(s), the last one returned by the Manifold API will take precedence.
- type: dict
-'''
-from ansible.errors import AnsibleError
-from ansible.plugins.lookup import LookupBase
-from ansible.module_utils.urls import open_url, ConnectionError, SSLValidationError
-from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError
-from ansible.module_utils.six.moves.urllib.parse import urlencode
-from ansible.module_utils import six
-from ansible.utils.display import Display
-from traceback import format_exception
-import json
-import sys
-
-display = Display()
-
-
-class ApiError(Exception):
- pass
-
-
-class ManifoldApiClient(object):
- base_url = 'https://api.{api}.manifold.co/v1/{endpoint}'
- http_agent = 'python-manifold-ansible-1.0.0'
-
- def __init__(self, token):
- self._token = token
-
- def request(self, api, endpoint, *args, **kwargs):
- """
- Send a request to API backend and pre-process a response.
- :param api: API to send a request to
- :type api: str
- :param endpoint: API endpoint to fetch data from
- :type endpoint: str
- :param args: other args for open_url
- :param kwargs: other kwargs for open_url
- :return: server response. JSON response is automatically deserialized.
- :rtype: dict | list | str
- """
-
- default_headers = {
- 'Authorization': "Bearer {0}".format(self._token),
- 'Accept': "*/*" # Otherwise server doesn't set content-type header
- }
-
- url = self.base_url.format(api=api, endpoint=endpoint)
-
- headers = default_headers
- arg_headers = kwargs.pop('headers', None)
- if arg_headers:
- headers.update(arg_headers)
-
- try:
- display.vvvv('manifold lookup connecting to {0}'.format(url))
- response = open_url(url, headers=headers, http_agent=self.http_agent, *args, **kwargs)
- data = response.read()
- if response.headers.get('content-type') == 'application/json':
- data = json.loads(data)
- return data
- except ValueError:
- raise ApiError('JSON response can\'t be parsed while requesting {url}:\n{json}'.format(json=data, url=url))
- except HTTPError as e:
- raise ApiError('Server returned: {err} while requesting {url}:\n{response}'.format(
- err=str(e), url=url, response=e.read()))
- except URLError as e:
- raise ApiError('Failed lookup url for {url} : {err}'.format(url=url, err=str(e)))
- except SSLValidationError as e:
- raise ApiError('Error validating the server\'s certificate for {url}: {err}'.format(url=url, err=str(e)))
- except ConnectionError as e:
- raise ApiError('Error connecting to {url}: {err}'.format(url=url, err=str(e)))
-
- def get_resources(self, team_id=None, project_id=None, label=None):
- """
- Get resources list
- :param team_id: ID of the Team to filter resources by
- :type team_id: str
- :param project_id: ID of the project to filter resources by
- :type project_id: str
- :param label: filter resources by a label, returns a list with one or zero elements
- :type label: str
- :return: list of resources
- :rtype: list
- """
- api = 'marketplace'
- endpoint = 'resources'
- query_params = {}
-
- if team_id:
- query_params['team_id'] = team_id
- if project_id:
- query_params['project_id'] = project_id
- if label:
- query_params['label'] = label
-
- if query_params:
- endpoint += '?' + urlencode(query_params)
-
- return self.request(api, endpoint)
-
- def get_teams(self, label=None):
- """
- Get teams list
- :param label: filter teams by a label, returns a list with one or zero elements
- :type label: str
- :return: list of teams
- :rtype: list
- """
- api = 'identity'
- endpoint = 'teams'
- data = self.request(api, endpoint)
- # Label filtering is not supported by API, however this function provides uniform interface
- if label:
- data = list(filter(lambda x: x['body']['label'] == label, data))
- return data
-
- def get_projects(self, label=None):
- """
- Get projects list
- :param label: filter projects by a label, returns a list with one or zero elements
- :type label: str
- :return: list of projects
- :rtype: list
- """
- api = 'marketplace'
- endpoint = 'projects'
- query_params = {}
-
- if label:
- query_params['label'] = label
-
- if query_params:
- endpoint += '?' + urlencode(query_params)
-
- return self.request(api, endpoint)
-
- def get_credentials(self, resource_id):
- """
- Get resource credentials
- :param resource_id: ID of the resource to filter credentials by
- :type resource_id: str
- :return:
- """
- api = 'marketplace'
- endpoint = 'credentials?' + urlencode({'resource_id': resource_id})
- return self.request(api, endpoint)
-
-
-class LookupModule(LookupBase):
-
- def run(self, terms, variables=None, **kwargs):
- """
- :param terms: a list of resources lookups to run.
- :param variables: ansible variables active at the time of the lookup
- :param api_token: API token
- :param project: optional project label
- :param team: optional team label
- :return: a dictionary of resources credentials
- """
-
- self.set_options(var_options=variables, direct=kwargs)
-
- api_token = self.get_option('api_token')
- project = self.get_option('project')
- team = self.get_option('team')
-
- try:
- labels = terms
- client = ManifoldApiClient(api_token)
-
- if team:
- team_data = client.get_teams(team)
- if len(team_data) == 0:
- raise AnsibleError("Team '{0}' does not exist".format(team))
- team_id = team_data[0]['id']
- else:
- team_id = None
-
- if project:
- project_data = client.get_projects(project)
- if len(project_data) == 0:
- raise AnsibleError("Project '{0}' does not exist".format(project))
- project_id = project_data[0]['id']
- else:
- project_id = None
-
- if len(labels) == 1: # Use server-side filtering if one resource is requested
- resources_data = client.get_resources(team_id=team_id, project_id=project_id, label=labels[0])
- else: # Get all resources and optionally filter labels
- resources_data = client.get_resources(team_id=team_id, project_id=project_id)
- if labels:
- resources_data = list(filter(lambda x: x['body']['label'] in labels, resources_data))
-
- if labels and len(resources_data) < len(labels):
- fetched_labels = [r['body']['label'] for r in resources_data]
- not_found_labels = [label for label in labels if label not in fetched_labels]
- raise AnsibleError("Resource(s) {0} do not exist".format(', '.join(not_found_labels)))
-
- credentials = {}
- cred_map = {}
- for resource in resources_data:
- resource_credentials = client.get_credentials(resource['id'])
- if len(resource_credentials) and resource_credentials[0]['body']['values']:
- for cred_key, cred_val in six.iteritems(resource_credentials[0]['body']['values']):
- label = resource['body']['label']
- if cred_key in credentials:
- display.warning("'{cred_key}' with label '{old_label}' was replaced by resource data "
- "with label '{new_label}'".format(cred_key=cred_key,
- old_label=cred_map[cred_key],
- new_label=label))
- credentials[cred_key] = cred_val
- cred_map[cred_key] = label
-
- ret = [credentials]
- return ret
- except ApiError as e:
- raise AnsibleError('API Error: {0}'.format(str(e)))
- except AnsibleError as e:
- raise e
- except Exception:
- exc_type, exc_value, exc_traceback = sys.exc_info()
- raise AnsibleError(format_exception(exc_type, exc_value, exc_traceback))
diff --git a/plugins/lookup/merge_variables.py b/plugins/lookup/merge_variables.py
index 6287914747..aff70f9799 100644
--- a/plugins/lookup/merge_variables.py
+++ b/plugins/lookup/merge_variables.py
@@ -6,72 +6,71 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
-DOCUMENTATION = """
- author:
- - Roy Lenferink (@rlenferink)
- - Mark Ettema (@m-a-r-k-e)
- - Alexander Petrenz (@alpex8)
- name: merge_variables
- short_description: merge variables whose names match a given pattern
+DOCUMENTATION = r"""
+author:
+ - Roy Lenferink (@rlenferink)
+ - Mark Ettema (@m-a-r-k-e)
+ - Alexander Petrenz (@alpex8)
+name: merge_variables
+short_description: Merge variables whose names match a given pattern
+description:
+ - This lookup returns the merged result of all variables in scope that match the given prefixes, suffixes, or regular expressions,
+ optionally.
+version_added: 6.5.0
+options:
+ _terms:
description:
- - This lookup returns the merged result of all variables in scope that match the given prefixes, suffixes, or
- regular expressions, optionally.
- version_added: 6.5.0
- options:
- _terms:
- description:
- - Depending on the value of O(pattern_type), this is a list of prefixes, suffixes, or regular expressions
- that will be used to match all variables that should be merged.
- required: true
- type: list
- elements: str
- pattern_type:
- description:
- - Change the way of searching for the specified pattern.
- type: str
- default: 'regex'
- choices:
- - prefix
- - suffix
- - regex
- env:
- - name: ANSIBLE_MERGE_VARIABLES_PATTERN_TYPE
- ini:
- - section: merge_variables_lookup
- key: pattern_type
- initial_value:
- description:
- - An initial value to start with.
- type: raw
- override:
- description:
- - Return an error, print a warning or ignore it when a key will be overwritten.
- - The default behavior V(error) makes the plugin fail when a key would be overwritten.
- - When V(warn) and V(ignore) are used, note that it is important to know that the variables
- are sorted by name before being merged. Keys for later variables in this order will overwrite
- keys of the same name for variables earlier in this order. To avoid potential confusion,
- better use O(override=error) whenever possible.
- type: str
- default: 'error'
- choices:
- - error
- - warn
- - ignore
- env:
- - name: ANSIBLE_MERGE_VARIABLES_OVERRIDE
- ini:
- - section: merge_variables_lookup
- key: override
- groups:
- description:
- - Search for variables accross hosts that belong to the given groups. This allows to collect configuration pieces
- accross different hosts (for example a service on a host with its database on another host).
- type: list
- elements: str
- version_added: 8.5.0
+ - Depending on the value of O(pattern_type), this is a list of prefixes, suffixes, or regular expressions that is used
+ to match all variables that should be merged.
+ required: true
+ type: list
+ elements: str
+ pattern_type:
+ description:
+ - Change the way of searching for the specified pattern.
+ type: str
+ default: 'regex'
+ choices:
+ - prefix
+ - suffix
+ - regex
+ env:
+ - name: ANSIBLE_MERGE_VARIABLES_PATTERN_TYPE
+ ini:
+ - section: merge_variables_lookup
+ key: pattern_type
+ initial_value:
+ description:
+ - An initial value to start with.
+ type: raw
+ override:
+ description:
+ - Return an error, print a warning or ignore it when a key is overwritten.
+ - The default behavior V(error) makes the plugin fail when a key would be overwritten.
+ - When V(warn) and V(ignore) are used, note that it is important to know that the variables are sorted by name before
+ being merged. Keys for later variables in this order overwrite keys of the same name for variables earlier in this
+ order. To avoid potential confusion, better use O(override=error) whenever possible.
+ type: str
+ default: 'error'
+ choices:
+ - error
+ - warn
+ - ignore
+ env:
+ - name: ANSIBLE_MERGE_VARIABLES_OVERRIDE
+ ini:
+ - section: merge_variables_lookup
+ key: override
+ groups:
+ description:
+ - Search for variables across hosts that belong to the given groups. This allows to collect configuration pieces across
+ different hosts (for example a service on a host with its database on another host).
+ type: list
+ elements: str
+ version_added: 8.5.0
"""
-EXAMPLES = """
+EXAMPLES = r"""
# Some example variables, they can be defined anywhere as long as they are in scope
test_init_list:
- "list init item 1"
@@ -91,7 +90,6 @@ testb__test_dict:
ports:
- 3
-
# Merge variables that end with '__test_dict' and store the result in a variable 'example_a'
example_a: "{{ lookup('community.general.merge_variables', '__test_dict', pattern_type='suffix') }}"
@@ -100,7 +98,6 @@ example_a: "{{ lookup('community.general.merge_variables', '__test_dict', patter
# - 1
# - 3
-
# Merge variables that match the '^.+__test_list$' regular expression, starting with an initial value and store the
# result in a variable 'example_b'
example_b: "{{ lookup('community.general.merge_variables', '^.+__test_list$', initial_value=test_init_list) }}"
@@ -112,12 +109,11 @@ example_b: "{{ lookup('community.general.merge_variables', '^.+__test_list$', in
# - "test b item 1"
"""
-RETURN = """
- _raw:
- description: In case the search matches list items, a list will be returned. In case the search matches dicts, a
- dict will be returned.
- type: raw
- elements: raw
+RETURN = r"""
+_raw:
+ description: In case the search matches list items, a list is returned. In case the search matches dicts, a dict is returned.
+ type: raw
+ elements: raw
"""
import re
@@ -149,7 +145,7 @@ class LookupModule(LookupBase):
ret = []
for term in terms:
if not isinstance(term, str):
- raise AnsibleError("Non-string type '{0}' passed, only 'str' types are allowed!".format(type(term)))
+ raise AnsibleError(f"Non-string type '{type(term)}' passed, only 'str' types are allowed!")
if not self._groups: # consider only own variables
ret.append(self._merge_vars(term, initial_value, variables))
@@ -186,9 +182,9 @@ class LookupModule(LookupBase):
return False
def _merge_vars(self, search_pattern, initial_value, variables):
- display.vvv("Merge variables with {0}: {1}".format(self._pattern_type, search_pattern))
+ display.vvv(f"Merge variables with {self._pattern_type}: {search_pattern}")
var_merge_names = sorted([key for key in variables.keys() if self._var_matches(key, search_pattern)])
- display.vvv("The following variables will be merged: {0}".format(var_merge_names))
+ display.vvv(f"The following variables will be merged: {var_merge_names}")
prev_var_type = None
result = None
@@ -197,8 +193,8 @@ class LookupModule(LookupBase):
result = initial_value
for var_name in var_merge_names:
- with self._templar.set_temporary_context(available_variables=variables): # tmp. switch renderer to context of current variables
- var_value = self._templar.template(variables[var_name]) # Render jinja2 templates
+ temp_templar = self._templar.copy_with_new_env(available_variables=variables) # tmp. switch renderer to context of current variables
+ var_value = temp_templar.template(variables[var_name]) # Render jinja2 templates
var_type = _verify_and_get_type(var_value)
if prev_var_type is None:
@@ -226,8 +222,7 @@ class LookupModule(LookupBase):
dest[key] += value
else:
if (key in dest) and dest[key] != value:
- msg = "The key '{0}' with value '{1}' will be overwritten with value '{2}' from '{3}.{0}'".format(
- key, dest[key], value, ".".join(path))
+ msg = f"The key '{key}' with value '{dest[key]}' will be overwritten with value '{value}' from '{'.'.join(path)}.{key}'"
if self._override == "error":
raise AnsibleError(msg)
diff --git a/plugins/lookup/onepassword.py b/plugins/lookup/onepassword.py
index 921cf9acb8..3dc589eaaf 100644
--- a/plugins/lookup/onepassword.py
+++ b/plugins/lookup/onepassword.py
@@ -8,39 +8,39 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
-DOCUMENTATION = '''
- name: onepassword
- author:
- - Scott Buchanan (@scottsb)
- - Andrew Zenk (@azenk)
- - Sam Doran (@samdoran)
- short_description: Fetch field values from 1Password
- description:
- - P(community.general.onepassword#lookup) wraps the C(op) command line utility to fetch specific field values from 1Password.
- requirements:
- - C(op) 1Password command line utility
- options:
- _terms:
- description: Identifier(s) (case-insensitive UUID or name) of item(s) to retrieve.
- required: true
- type: list
- elements: string
- account_id:
- version_added: 7.5.0
- domain:
- version_added: 3.2.0
- field:
- description: Field to return from each matching item (case-insensitive).
- default: 'password'
- type: str
- service_account_token:
- version_added: 7.1.0
- extends_documentation_fragment:
- - community.general.onepassword
- - community.general.onepassword.lookup
-'''
+DOCUMENTATION = r"""
+name: onepassword
+author:
+ - Scott Buchanan (@scottsb)
+ - Andrew Zenk (@azenk)
+ - Sam Doran (@samdoran)
+short_description: Fetch field values from 1Password
+description:
+ - P(community.general.onepassword#lookup) wraps the C(op) command line utility to fetch specific field values from 1Password.
+requirements:
+ - C(op) 1Password command line utility
+options:
+ _terms:
+ description: Identifier(s) (case-insensitive UUID or name) of item(s) to retrieve.
+ required: true
+ type: list
+ elements: string
+ account_id:
+ version_added: 7.5.0
+ domain:
+ version_added: 3.2.0
+ field:
+ description: Field to return from each matching item (case-insensitive).
+ default: 'password'
+ type: str
+ service_account_token:
+ version_added: 7.1.0
+extends_documentation_fragment:
+ - community.general.onepassword
+ - community.general.onepassword.lookup
+"""
-EXAMPLES = """
+EXAMPLES = r"""
# These examples only work when already signed in to 1Password
- name: Retrieve password for KITT when already signed in to 1Password
ansible.builtin.debug:
@@ -56,32 +56,24 @@ EXAMPLES = """
- name: Retrieve password for HAL when not signed in to 1Password
ansible.builtin.debug:
- var: lookup('community.general.onepassword',
- 'HAL 9000',
- subdomain='Discovery',
- master_password=vault_master_password)
+ var: lookup('community.general.onepassword', 'HAL 9000', subdomain='Discovery', master_password=vault_master_password)
- name: Retrieve password for HAL when never signed in to 1Password
ansible.builtin.debug:
- var: lookup('community.general.onepassword',
- 'HAL 9000',
- subdomain='Discovery',
- master_password=vault_master_password,
- username='tweety@acme.com',
- secret_key=vault_secret_key)
+ var: >-
+ lookup('community.general.onepassword', 'HAL 9000', subdomain='Discovery', master_password=vault_master_password,
+ username='tweety@acme.com', secret_key=vault_secret_key)
- name: Retrieve password from specific account
ansible.builtin.debug:
- var: lookup('community.general.onepassword',
- 'HAL 9000',
- account_id='abc123')
+ var: lookup('community.general.onepassword', 'HAL 9000', account_id='abc123')
"""
-RETURN = """
- _raw:
- description: Field data requested.
- type: list
- elements: str
+RETURN = r"""
+_raw:
+ description: Field data requested.
+ type: list
+ elements: str
"""
import abc
@@ -140,11 +132,11 @@ class OnePassCLIBase(with_metaclass(abc.ABCMeta, object)):
if missing:
prefix = "Unable to sign in to 1Password. Missing required parameter"
plural = ""
- suffix = ": {params}.".format(params=", ".join(missing))
+ suffix = f": {', '.join(missing)}."
if len(missing) > 1:
plural = "s"
- msg = "{prefix}{plural}{suffix}".format(prefix=prefix, plural=plural, suffix=suffix)
+ msg = f"{prefix}{plural}{suffix}"
raise AnsibleLookupError(msg)
@abc.abstractmethod
@@ -169,7 +161,7 @@ class OnePassCLIBase(with_metaclass(abc.ABCMeta, object)):
rc = p.wait()
if not ignore_errors and rc != expected_rc:
- raise AnsibleLookupError(to_text(err))
+ raise AnsibleLookupError(str(err))
return rc, out, err
@@ -210,12 +202,12 @@ class OnePassCLIBase(with_metaclass(abc.ABCMeta, object)):
try:
bin_path = get_bin_path(cls.bin)
except ValueError:
- raise AnsibleLookupError("Unable to locate '%s' command line tool" % cls.bin)
+ raise AnsibleLookupError(f"Unable to locate '{cls.bin}' command line tool")
try:
b_out = subprocess.check_output([bin_path, "--version"], stderr=subprocess.PIPE)
except subprocess.CalledProcessError as cpe:
- raise AnsibleLookupError("Unable to get the op version: %s" % cpe)
+ raise AnsibleLookupError(f"Unable to get the op version: {cpe}")
return to_text(b_out).strip()
@@ -300,7 +292,7 @@ class OnePassCLIv1(OnePassCLIBase):
if self.account_id:
args.extend(["--account", self.account_id])
elif self.subdomain:
- account = "{subdomain}.{domain}".format(subdomain=self.subdomain, domain=self.domain)
+ account = f"{self.subdomain}.{self.domain}"
args.extend(["--account", account])
rc, out, err = self._run(args, ignore_errors=True)
@@ -326,7 +318,7 @@ class OnePassCLIv1(OnePassCLIBase):
args = [
"signin",
- "{0}.{1}".format(self.subdomain, self.domain),
+ f"{self.subdomain}.{self.domain}",
to_bytes(self.username),
to_bytes(self.secret_key),
"--raw",
@@ -341,7 +333,7 @@ class OnePassCLIv1(OnePassCLIBase):
args.extend(["--account", self.account_id])
if vault is not None:
- args += ["--vault={0}".format(vault)]
+ args += [f"--vault={vault}"]
if token is not None:
args += [to_bytes("--session=") + token]
@@ -512,7 +504,7 @@ class OnePassCLIv2(OnePassCLIBase):
args = ["account", "list"]
if self.subdomain:
- account = "{subdomain}.{domain}".format(subdomain=self.subdomain, domain=self.domain)
+ account = f"{self.subdomain}.{self.domain}"
args.extend(["--account", account])
rc, out, err = self._run(args)
@@ -525,7 +517,7 @@ class OnePassCLIv2(OnePassCLIBase):
if self.account_id:
args.extend(["--account", self.account_id])
elif self.subdomain:
- account = "{subdomain}.{domain}".format(subdomain=self.subdomain, domain=self.domain)
+ account = f"{self.subdomain}.{self.domain}"
args.extend(["--account", account])
rc, out, err = self._run(args, ignore_errors=True)
@@ -545,7 +537,7 @@ class OnePassCLIv2(OnePassCLIBase):
args = [
"account", "add", "--raw",
- "--address", "{0}.{1}".format(self.subdomain, self.domain),
+ "--address", f"{self.subdomain}.{self.domain}",
"--email", to_bytes(self.username),
"--signin",
]
@@ -553,14 +545,12 @@ class OnePassCLIv2(OnePassCLIBase):
environment_update = {"OP_SECRET_KEY": self.secret_key}
return self._run(args, command_input=to_bytes(self.master_password), environment_update=environment_update)
- def get_raw(self, item_id, vault=None, token=None):
- args = ["item", "get", item_id, "--format", "json"]
-
+ def _add_parameters_and_run(self, args, vault=None, token=None):
if self.account_id:
args.extend(["--account", self.account_id])
if vault is not None:
- args += ["--vault={0}".format(vault)]
+ args += [f"--vault={vault}"]
if self.connect_host and self.connect_token:
if vault is None:
@@ -582,6 +572,10 @@ class OnePassCLIv2(OnePassCLIBase):
return self._run(args)
+ def get_raw(self, item_id, vault=None, token=None):
+ args = ["item", "get", item_id, "--format", "json"]
+ return self._add_parameters_and_run(args, vault=vault, token=token)
+
def signin(self):
self._check_required_params(['master_password'])
@@ -627,7 +621,7 @@ class OnePass(object):
except TypeError as e:
raise AnsibleLookupError(e)
- raise AnsibleLookupError("op version %s is unsupported" % version)
+ raise AnsibleLookupError(f"op version {version} is unsupported")
def set_token(self):
if self._config.config_file_path and os.path.isfile(self._config.config_file_path):
diff --git a/plugins/lookup/onepassword_doc.py b/plugins/lookup/onepassword_doc.py
index 789e51c35a..82ca790a31 100644
--- a/plugins/lookup/onepassword_doc.py
+++ b/plugins/lookup/onepassword_doc.py
@@ -6,68 +6,53 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
-DOCUMENTATION = '''
- name: onepassword_doc
- author:
- - Sam Doran (@samdoran)
- requirements:
- - C(op) 1Password command line utility version 2 or later.
- short_description: Fetch documents stored in 1Password
- version_added: "8.1.0"
- description:
- - P(community.general.onepassword_doc#lookup) wraps C(op) command line utility to fetch one or more documents from 1Password.
- notes:
- - The document contents are a string exactly as stored in 1Password.
- - This plugin requires C(op) version 2 or later.
+DOCUMENTATION = r"""
+name: onepassword_doc
+author:
+ - Sam Doran (@samdoran)
+requirements:
+ - C(op) 1Password command line utility version 2 or later.
+short_description: Fetch documents stored in 1Password
+version_added: "8.1.0"
+description:
+ - P(community.general.onepassword_doc#lookup) wraps C(op) command line utility to fetch one or more documents from 1Password.
+notes:
+ - The document contents are a string exactly as stored in 1Password.
+ - This plugin requires C(op) version 2 or later.
+options:
+ _terms:
+ description: Identifier(s) (case-insensitive UUID or name) of item(s) to retrieve.
+ required: true
+ type: list
+ elements: string
- options:
- _terms:
- description: Identifier(s) (case-insensitive UUID or name) of item(s) to retrieve.
- required: true
- type: list
- elements: string
+extends_documentation_fragment:
+ - community.general.onepassword
+ - community.general.onepassword.lookup
+"""
- extends_documentation_fragment:
- - community.general.onepassword
- - community.general.onepassword.lookup
-'''
-
-EXAMPLES = """
+EXAMPLES = r"""
+---
- name: Retrieve a private key from 1Password
ansible.builtin.debug:
var: lookup('community.general.onepassword_doc', 'Private key')
"""
-RETURN = """
- _raw:
- description: Requested document
- type: list
- elements: string
+RETURN = r"""
+_raw:
+ description: Requested document.
+ type: list
+ elements: string
"""
from ansible_collections.community.general.plugins.lookup.onepassword import OnePass, OnePassCLIv2
-from ansible.errors import AnsibleLookupError
-from ansible.module_utils.common.text.converters import to_bytes
from ansible.plugins.lookup import LookupBase
class OnePassCLIv2Doc(OnePassCLIv2):
def get_raw(self, item_id, vault=None, token=None):
args = ["document", "get", item_id]
- if vault is not None:
- args = [*args, "--vault={0}".format(vault)]
-
- if self.service_account_token:
- if vault is None:
- raise AnsibleLookupError("'vault' is required with 'service_account_token'")
-
- environment_update = {"OP_SERVICE_ACCOUNT_TOKEN": self.service_account_token}
- return self._run(args, environment_update=environment_update)
-
- if token is not None:
- args = [*args, to_bytes("--session=") + token]
-
- return self._run(args)
+ return self._add_parameters_and_run(args, vault=vault, token=token)
class LookupModule(LookupBase):
diff --git a/plugins/lookup/onepassword_raw.py b/plugins/lookup/onepassword_raw.py
index dc3e590329..2d9829ec9d 100644
--- a/plugins/lookup/onepassword_raw.py
+++ b/plugins/lookup/onepassword_raw.py
@@ -8,35 +8,36 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
-DOCUMENTATION = '''
- name: onepassword_raw
- author:
- - Scott Buchanan (@scottsb)
- - Andrew Zenk (@azenk)
- - Sam Doran (@samdoran)
- requirements:
- - C(op) 1Password command line utility
- short_description: Fetch an entire item from 1Password
- description:
- - P(community.general.onepassword_raw#lookup) wraps C(op) command line utility to fetch an entire item from 1Password.
- options:
- _terms:
- description: Identifier(s) (case-insensitive UUID or name) of item(s) to retrieve.
- required: true
- type: list
- elements: string
- account_id:
- version_added: 7.5.0
- domain:
- version_added: 6.0.0
- service_account_token:
- version_added: 7.1.0
- extends_documentation_fragment:
- - community.general.onepassword
- - community.general.onepassword.lookup
-'''
+DOCUMENTATION = r"""
+name: onepassword_raw
+author:
+ - Scott Buchanan (@scottsb)
+ - Andrew Zenk (@azenk)
+ - Sam Doran (@samdoran)
+requirements:
+ - C(op) 1Password command line utility
+short_description: Fetch an entire item from 1Password
+description:
+ - P(community.general.onepassword_raw#lookup) wraps C(op) command line utility to fetch an entire item from 1Password.
+options:
+ _terms:
+ description: Identifier(s) (case-insensitive UUID or name) of item(s) to retrieve.
+ required: true
+ type: list
+ elements: string
+ account_id:
+ version_added: 7.5.0
+ domain:
+ version_added: 6.0.0
+ service_account_token:
+ version_added: 7.1.0
+extends_documentation_fragment:
+ - community.general.onepassword
+ - community.general.onepassword.lookup
+"""
-EXAMPLES = """
+EXAMPLES = r"""
+---
- name: Retrieve all data about Wintermute
ansible.builtin.debug:
var: lookup('community.general.onepassword_raw', 'Wintermute')
@@ -46,11 +47,11 @@ EXAMPLES = """
var: lookup('community.general.onepassword_raw', 'Wintermute', subdomain='Turing', vault_password='DmbslfLvasjdl')
"""
-RETURN = """
- _raw:
- description: Entire item requested.
- type: list
- elements: dict
+RETURN = r"""
+_raw:
+ description: Entire item requested.
+ type: list
+ elements: dict
"""
import json
diff --git a/plugins/lookup/onepassword_ssh_key.py b/plugins/lookup/onepassword_ssh_key.py
new file mode 100644
index 0000000000..395de59f23
--- /dev/null
+++ b/plugins/lookup/onepassword_ssh_key.py
@@ -0,0 +1,119 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2025, Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import annotations
+
+DOCUMENTATION = r"""
+name: onepassword_ssh_key
+author:
+ - Mohammed Babelly (@mohammedbabelly20)
+requirements:
+ - C(op) 1Password command line utility version 2 or later.
+short_description: Fetch SSH keys stored in 1Password
+version_added: "10.3.0"
+description:
+ - P(community.general.onepassword_ssh_key#lookup) wraps C(op) command line utility to fetch SSH keys from 1Password.
+notes:
+ - By default, it returns the private key value in PKCS#8 format, unless O(ssh_format=true) is passed.
+ - The pluging works only for C(SSHKEY) type items.
+ - This plugin requires C(op) version 2 or later.
+options:
+ _terms:
+ description: Identifier(s) (case-insensitive UUID or name) of item(s) to retrieve.
+ required: true
+ type: list
+ elements: string
+ ssh_format:
+ description: Output key in SSH format if V(true). Otherwise, outputs in the default format (PKCS#8).
+ default: false
+ type: bool
+
+extends_documentation_fragment:
+ - community.general.onepassword
+ - community.general.onepassword.lookup
+"""
+
+EXAMPLES = r"""
+---
+- name: Retrieve the private SSH key from 1Password
+ ansible.builtin.debug:
+ msg: "{{ lookup('community.general.onepassword_ssh_key', 'SSH Key', ssh_format=true) }}"
+"""
+
+RETURN = r"""
+_raw:
+ description: Private key of SSH keypair.
+ type: list
+ elements: string
+"""
+import json
+
+from ansible_collections.community.general.plugins.lookup.onepassword import (
+ OnePass,
+ OnePassCLIv2,
+)
+from ansible.errors import AnsibleLookupError
+from ansible.plugins.lookup import LookupBase
+
+
+class LookupModule(LookupBase):
+ def get_ssh_key(self, out, item_id, ssh_format=False):
+ data = json.loads(out)
+
+ if data.get("category") != "SSH_KEY":
+ raise AnsibleLookupError(f"Item {item_id} is not an SSH key")
+
+ private_key_field = next(
+ (
+ field
+ for field in data.get("fields", {})
+ if field.get("id") == "private_key" and field.get("type") == "SSHKEY"
+ ),
+ None,
+ )
+ if not private_key_field:
+ raise AnsibleLookupError(f"No private key found for item {item_id}.")
+
+ if ssh_format:
+ return (
+ private_key_field.get("ssh_formats", {})
+ .get("openssh", {})
+ .get("value", "")
+ )
+ return private_key_field.get("value", "")
+
+ def run(self, terms, variables=None, **kwargs):
+ self.set_options(var_options=variables, direct=kwargs)
+
+ ssh_format = self.get_option("ssh_format")
+ vault = self.get_option("vault")
+ subdomain = self.get_option("subdomain")
+ domain = self.get_option("domain", "1password.com")
+ username = self.get_option("username")
+ secret_key = self.get_option("secret_key")
+ master_password = self.get_option("master_password")
+ service_account_token = self.get_option("service_account_token")
+ account_id = self.get_option("account_id")
+ connect_host = self.get_option("connect_host")
+ connect_token = self.get_option("connect_token")
+
+ op = OnePass(
+ subdomain=subdomain,
+ domain=domain,
+ username=username,
+ secret_key=secret_key,
+ master_password=master_password,
+ service_account_token=service_account_token,
+ account_id=account_id,
+ connect_host=connect_host,
+ connect_token=connect_token,
+ cli_class=OnePassCLIv2,
+ )
+ op.assert_logged_in()
+
+ return [
+ self.get_ssh_key(op.get_raw(term, vault), term, ssh_format=ssh_format)
+ for term in terms
+ ]
diff --git a/plugins/lookup/passwordstore.py b/plugins/lookup/passwordstore.py
index 06c476f8b3..8f87e87034 100644
--- a/plugins/lookup/passwordstore.py
+++ b/plugins/lookup/passwordstore.py
@@ -7,167 +7,168 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
-DOCUMENTATION = '''
- name: passwordstore
- author:
- - Patrick Deelman (!UNKNOWN)
- short_description: manage passwords with passwordstore.org's pass utility
+DOCUMENTATION = r"""
+name: passwordstore
+author:
+ - Patrick Deelman (!UNKNOWN)
+short_description: Manage passwords with passwordstore.org's pass utility
+description:
+ - Enables Ansible to retrieve, create or update passwords from the passwordstore.org pass utility. It can also retrieve,
+ create or update YAML style keys stored as multilines in the passwordfile.
+ - To avoid problems when accessing multiple secrets at once, add C(auto-expand-secmem) to C(~/.gnupg/gpg-agent.conf). Where
+ this is not possible, consider using O(lock=readwrite) instead.
+options:
+ _terms:
+ description: Query key.
+ required: true
+ directory:
description:
- - Enables Ansible to retrieve, create or update passwords from the passwordstore.org pass utility.
- It can also retrieve, create or update YAML style keys stored as multilines in the passwordfile.
- - To avoid problems when accessing multiple secrets at once, add C(auto-expand-secmem) to
- C(~/.gnupg/gpg-agent.conf). Where this is not possible, consider using O(lock=readwrite) instead.
- options:
- _terms:
- description: query key.
- required: true
- directory:
- description:
- - The directory of the password store.
- - If O(backend=pass), the default is V(~/.password-store) is used.
- - If O(backend=gopass), then the default is the C(path) field in C(~/.config/gopass/config.yml),
- falling back to V(~/.local/share/gopass/stores/root) if C(path) is not defined in the gopass config.
- type: path
- vars:
- - name: passwordstore
- env:
- - name: PASSWORD_STORE_DIR
- create:
- description: Create the password or the subkey if it does not already exist. Takes precedence over O(missing).
- type: bool
- default: false
- overwrite:
- description: Overwrite the password or the subkey if it does already exist.
- type: bool
- default: false
- umask:
- description:
- - Sets the umask for the created V(.gpg) files. The first octed must be greater than 3 (user readable).
- - Note pass' default value is V('077').
- type: string
- env:
- - name: PASSWORD_STORE_UMASK
- version_added: 1.3.0
- returnall:
- description: Return all the content of the password, not only the first line.
- type: bool
- default: false
- subkey:
- description:
- - By default return a specific subkey of the password. When set to V(password), always returns the first line.
- - With O(overwrite=true), it will create the subkey and return it.
- type: str
- default: password
- userpass:
- description: Specify a password to save, instead of a generated one.
- type: str
- length:
- description: The length of the generated password.
- type: integer
- default: 16
- backup:
- description: Used with O(overwrite=true). Backup the previous password or subkey in a subkey.
- type: bool
- default: false
- nosymbols:
- description: Use alphanumeric characters.
- type: bool
- default: false
- missing:
- description:
- - List of preference about what to do if the password file is missing.
- - If O(create=true), the value for this option is ignored and assumed to be V(create).
- - If set to V(error), the lookup will error out if the passname does not exist.
- - If set to V(create), the passname will be created with the provided length O(length) if it does not exist.
- - If set to V(empty) or V(warn), will return a V(none) in case the passname does not exist.
- When using C(lookup) and not C(query), this will be translated to an empty string.
- version_added: 3.1.0
- type: str
- default: error
- choices:
- - error
- - warn
- - empty
- - create
- lock:
- description:
- - How to synchronize operations.
- - The default of V(write) only synchronizes write operations.
- - V(readwrite) synchronizes all operations (including read). This makes sure that gpg-agent is never called in parallel.
- - V(none) does not do any synchronization.
- ini:
- - section: passwordstore_lookup
- key: lock
- type: str
- default: write
- choices:
- - readwrite
- - write
- - none
- version_added: 4.5.0
- locktimeout:
- description:
- - Lock timeout applied when O(lock) is not V(none).
- - Time with a unit suffix, V(s), V(m), V(h) for seconds, minutes, and hours, respectively. For example, V(900s) equals V(15m).
- - Correlates with C(pinentry-timeout) in C(~/.gnupg/gpg-agent.conf), see C(man gpg-agent) for details.
- ini:
- - section: passwordstore_lookup
- key: locktimeout
- type: str
- default: 15m
- version_added: 4.5.0
- backend:
- description:
- - Specify which backend to use.
- - Defaults to V(pass), passwordstore.org's original pass utility.
- - V(gopass) support is incomplete.
- ini:
- - section: passwordstore_lookup
- key: backend
- vars:
- - name: passwordstore_backend
- type: str
- default: pass
- choices:
- - pass
- - gopass
- version_added: 5.2.0
- timestamp:
- description: Add the password generation information to the end of the file.
- type: bool
- default: true
- version_added: 8.1.0
- preserve:
- description: Include the old (edited) password inside the pass file.
- type: bool
- default: true
- version_added: 8.1.0
- missing_subkey:
- description:
- - Preference about what to do if the password subkey is missing.
- - If set to V(error), the lookup will error out if the subkey does not exist.
- - If set to V(empty) or V(warn), will return a V(none) in case the subkey does not exist.
- version_added: 8.6.0
- type: str
- default: empty
- choices:
- - error
- - warn
- - empty
- ini:
- - section: passwordstore_lookup
- key: missing_subkey
- notes:
- - The lookup supports passing all options as lookup parameters since community.general 6.0.0.
-'''
-EXAMPLES = """
+ - The directory of the password store.
+ - If O(backend=pass), the default is V(~/.password-store) is used.
+ - If O(backend=gopass), then the default is the C(path) field in C(~/.config/gopass/config.yml), falling back to V(~/.local/share/gopass/stores/root)
+ if C(path) is not defined in the gopass config.
+ type: path
+ vars:
+ - name: passwordstore
+ env:
+ - name: PASSWORD_STORE_DIR
+ create:
+ description: Create the password or the subkey if it does not already exist. Takes precedence over O(missing).
+ type: bool
+ default: false
+ overwrite:
+ description: Overwrite the password or the subkey if it does already exist.
+ type: bool
+ default: false
+ umask:
+ description:
+ - Sets the umask for the created V(.gpg) files. The first octed must be greater than 3 (user readable).
+ - Note pass' default value is V('077').
+ type: string
+ env:
+ - name: PASSWORD_STORE_UMASK
+ version_added: 1.3.0
+ returnall:
+ description: Return all the content of the password, not only the first line.
+ type: bool
+ default: false
+ subkey:
+ description:
+ - By default return a specific subkey of the password. When set to V(password), always returns the first line.
+ - With O(overwrite=true), it creates the subkey and returns it.
+ type: str
+ default: password
+ userpass:
+ description: Specify a password to save, instead of a generated one.
+ type: str
+ length:
+ description: The length of the generated password.
+ type: integer
+ default: 16
+ backup:
+ description: Used with O(overwrite=true). Backup the previous password or subkey in a subkey.
+ type: bool
+ default: false
+ nosymbols:
+ description: Use alphanumeric characters.
+ type: bool
+ default: false
+ missing:
+ description:
+ - List of preference about what to do if the password file is missing.
+ - If O(create=true), the value for this option is ignored and assumed to be V(create).
+ - If set to V(error), the lookup fails out if the passname does not exist.
+ - If set to V(create), the passname is created with the provided length O(length) if it does not exist.
+ - If set to V(empty) or V(warn), it returns a V(none) in case the passname does not exist. When using C(lookup) and
+ not C(query), this is translated to an empty string.
+ version_added: 3.1.0
+ type: str
+ default: error
+ choices:
+ - error
+ - warn
+ - empty
+ - create
+ lock:
+ description:
+ - How to synchronize operations.
+ - The default of V(write) only synchronizes write operations.
+ - V(readwrite) synchronizes all operations (including read). This makes sure that gpg-agent is never called in parallel.
+ - V(none) does not do any synchronization.
+ ini:
+ - section: passwordstore_lookup
+ key: lock
+ type: str
+ default: write
+ choices:
+ - readwrite
+ - write
+ - none
+ version_added: 4.5.0
+ locktimeout:
+ description:
+ - Lock timeout applied when O(lock) is not V(none).
+ - Time with a unit suffix, V(s), V(m), V(h) for seconds, minutes, and hours, respectively. For example, V(900s) equals
+ V(15m).
+ - Correlates with C(pinentry-timeout) in C(~/.gnupg/gpg-agent.conf), see C(man gpg-agent) for details.
+ ini:
+ - section: passwordstore_lookup
+ key: locktimeout
+ type: str
+ default: 15m
+ version_added: 4.5.0
+ backend:
+ description:
+ - Specify which backend to use.
+ - Defaults to V(pass), passwordstore.org's original pass utility.
+ - V(gopass) support is incomplete.
+ ini:
+ - section: passwordstore_lookup
+ key: backend
+ vars:
+ - name: passwordstore_backend
+ type: str
+ default: pass
+ choices:
+ - pass
+ - gopass
+ version_added: 5.2.0
+ timestamp:
+ description: Add the password generation information to the end of the file.
+ type: bool
+ default: true
+ version_added: 8.1.0
+ preserve:
+ description: Include the old (edited) password inside the pass file.
+ type: bool
+ default: true
+ version_added: 8.1.0
+ missing_subkey:
+ description:
+ - Preference about what to do if the password subkey is missing.
+ - If set to V(error), the lookup fails out if the subkey does not exist.
+ - If set to V(empty) or V(warn), it returns a V(none) in case the subkey does not exist.
+ version_added: 8.6.0
+ type: str
+ default: empty
+ choices:
+ - error
+ - warn
+ - empty
+ ini:
+ - section: passwordstore_lookup
+ key: missing_subkey
+notes:
+ - The lookup supports passing all options as lookup parameters since community.general 6.0.0.
+"""
+EXAMPLES = r"""
ansible.cfg: |
[passwordstore_lookup]
lock=readwrite
locktimeout=45s
missing_subkey=warn
-tasks.yml: |
+tasks.yml: |-
---
# Debug is used for examples, BAD IDEA to show passwords on screen
@@ -233,10 +234,10 @@ tasks.yml: |
passfilecontent: "{{ lookup('community.general.passwordstore', 'example/test', returnall=true)}}"
"""
-RETURN = """
+RETURN = r"""
_raw:
description:
- - a password
+ - A password.
type: list
elements: str
"""
@@ -315,7 +316,7 @@ class LookupModule(LookupBase):
)
self.realpass = 'pass: the standard unix password manager' in passoutput
except (subprocess.CalledProcessError) as e:
- raise AnsibleError('exit code {0} while running {1}. Error output: {2}'.format(e.returncode, e.cmd, e.output))
+ raise AnsibleError(f'exit code {e.returncode} while running {e.cmd}. Error output: {e.output}')
return self.realpass
@@ -332,7 +333,7 @@ class LookupModule(LookupBase):
for param in params[1:]:
name, value = param.split('=', 1)
if name not in self.paramvals:
- raise AnsibleAssertionError('%s not in paramvals' % name)
+ raise AnsibleAssertionError(f'{name} not in paramvals')
self.paramvals[name] = value
except (ValueError, AssertionError) as e:
raise AnsibleError(e)
@@ -344,12 +345,12 @@ class LookupModule(LookupBase):
except (ValueError, AssertionError) as e:
raise AnsibleError(e)
if self.paramvals['missing'] not in ['error', 'warn', 'create', 'empty']:
- raise AnsibleError("{0} is not a valid option for missing".format(self.paramvals['missing']))
+ raise AnsibleError(f"{self.paramvals['missing']} is not a valid option for missing")
if not isinstance(self.paramvals['length'], int):
if self.paramvals['length'].isdigit():
self.paramvals['length'] = int(self.paramvals['length'])
else:
- raise AnsibleError("{0} is not a correct value for length".format(self.paramvals['length']))
+ raise AnsibleError(f"{self.paramvals['length']} is not a correct value for length")
if self.paramvals['create']:
self.paramvals['missing'] = 'create'
@@ -364,7 +365,7 @@ class LookupModule(LookupBase):
# Set PASSWORD_STORE_DIR
self.env['PASSWORD_STORE_DIR'] = self.paramvals['directory']
elif self.is_real_pass():
- raise AnsibleError('Passwordstore directory \'{0}\' does not exist'.format(self.paramvals['directory']))
+ raise AnsibleError(f"Passwordstore directory '{self.paramvals['directory']}' does not exist")
# Set PASSWORD_STORE_UMASK if umask is set
if self.paramvals.get('umask') is not None:
@@ -394,19 +395,19 @@ class LookupModule(LookupBase):
name, value = line.split(':', 1)
self.passdict[name.strip()] = value.strip()
if (self.backend == 'gopass' or
- os.path.isfile(os.path.join(self.paramvals['directory'], self.passname + ".gpg"))
+ os.path.isfile(os.path.join(self.paramvals['directory'], f"{self.passname}.gpg"))
or not self.is_real_pass()):
# When using real pass, only accept password as found if there is a .gpg file for it (might be a tree node otherwise)
return True
except (subprocess.CalledProcessError) as e:
# 'not in password store' is the expected error if a password wasn't found
if 'not in the password store' not in e.output:
- raise AnsibleError('exit code {0} while running {1}. Error output: {2}'.format(e.returncode, e.cmd, e.output))
+ raise AnsibleError(f'exit code {e.returncode} while running {e.cmd}. Error output: {e.output}')
if self.paramvals['missing'] == 'error':
- raise AnsibleError('passwordstore: passname {0} not found and missing=error is set'.format(self.passname))
+ raise AnsibleError(f'passwordstore: passname {self.passname} not found and missing=error is set')
elif self.paramvals['missing'] == 'warn':
- display.warning('passwordstore: passname {0} not found'.format(self.passname))
+ display.warning(f'passwordstore: passname {self.passname} not found')
return False
@@ -433,11 +434,11 @@ class LookupModule(LookupBase):
msg_lines = []
subkey_exists = False
- subkey_line = "{0}: {1}".format(subkey, newpass)
+ subkey_line = f"{subkey}: {newpass}"
oldpass = None
for line in self.passoutput:
- if line.startswith("{0}: ".format(subkey)):
+ if line.startswith(f"{subkey}: "):
oldpass = self.passdict[subkey]
line = subkey_line
subkey_exists = True
@@ -449,9 +450,7 @@ class LookupModule(LookupBase):
if self.paramvals["timestamp"] and self.paramvals["backup"] and oldpass and oldpass != newpass:
msg_lines.append(
- "lookup_pass: old subkey '{0}' password was {1} (Updated on {2})\n".format(
- subkey, oldpass, datetime
- )
+ f"lookup_pass: old subkey '{subkey}' password was {oldpass} (Updated on {datetime})\n"
)
msg = os.linesep.join(msg_lines)
@@ -464,12 +463,12 @@ class LookupModule(LookupBase):
if self.paramvals['preserve'] and self.passoutput[1:]:
msg += '\n'.join(self.passoutput[1:]) + '\n'
if self.paramvals['timestamp'] and self.paramvals['backup']:
- msg += "lookup_pass: old password was {0} (Updated on {1})\n".format(self.password, datetime)
+ msg += f"lookup_pass: old password was {self.password} (Updated on {datetime})\n"
try:
check_output2([self.pass_cmd, 'insert', '-f', '-m', self.passname], input=msg, env=self.env)
except (subprocess.CalledProcessError) as e:
- raise AnsibleError('exit code {0} while running {1}. Error output: {2}'.format(e.returncode, e.cmd, e.output))
+ raise AnsibleError(f'exit code {e.returncode} while running {e.cmd}. Error output: {e.output}')
return newpass
def generate_password(self):
@@ -480,17 +479,17 @@ class LookupModule(LookupBase):
subkey = self.paramvals["subkey"]
if subkey != "password":
- msg = "\n\n{0}: {1}".format(subkey, newpass)
+ msg = f"\n\n{subkey}: {newpass}"
else:
msg = newpass
if self.paramvals['timestamp']:
- msg += '\n' + "lookup_pass: First generated by ansible on {0}\n".format(datetime)
+ msg += f"\nlookup_pass: First generated by ansible on {datetime}\n"
try:
check_output2([self.pass_cmd, 'insert', '-f', '-m', self.passname], input=msg, env=self.env)
except (subprocess.CalledProcessError) as e:
- raise AnsibleError('exit code {0} while running {1}. Error output: {2}'.format(e.returncode, e.cmd, e.output))
+ raise AnsibleError(f'exit code {e.returncode} while running {e.cmd}. Error output: {e.output}')
return newpass
@@ -505,16 +504,12 @@ class LookupModule(LookupBase):
else:
if self.paramvals["missing_subkey"] == "error":
raise AnsibleError(
- "passwordstore: subkey {0} for passname {1} not found and missing_subkey=error is set".format(
- self.paramvals["subkey"], self.passname
- )
+ f"passwordstore: subkey {self.paramvals['subkey']} for passname {self.passname} not found and missing_subkey=error is set"
)
if self.paramvals["missing_subkey"] == "warn":
display.warning(
- "passwordstore: subkey {0} for passname {1} not found".format(
- self.paramvals["subkey"], self.passname
- )
+ f"passwordstore: subkey {self.paramvals['subkey']} for passname {self.passname} not found"
)
return None
@@ -524,7 +519,7 @@ class LookupModule(LookupBase):
if self.get_option('lock') == type:
tmpdir = os.environ.get('TMPDIR', '/tmp')
user = os.environ.get('USER')
- lockfile = os.path.join(tmpdir, '.{0}.passwordstore.lock'.format(user))
+ lockfile = os.path.join(tmpdir, f'.{user}.passwordstore.lock')
with FileLock().lock_file(lockfile, tmpdir, self.lock_timeout):
self.locked = type
yield
@@ -538,7 +533,7 @@ class LookupModule(LookupBase):
self.locked = None
timeout = self.get_option('locktimeout')
if not re.match('^[0-9]+[smh]$', timeout):
- raise AnsibleError("{0} is not a correct value for locktimeout".format(timeout))
+ raise AnsibleError(f"{timeout} is not a correct value for locktimeout")
unit_to_seconds = {"s": 1, "m": 60, "h": 3600}
self.lock_timeout = int(timeout[:-1]) * unit_to_seconds[timeout[-1]]
diff --git a/plugins/lookup/random_pet.py b/plugins/lookup/random_pet.py
index 71a62cbca0..8f9b3cbd00 100644
--- a/plugins/lookup/random_pet.py
+++ b/plugins/lookup/random_pet.py
@@ -8,38 +8,38 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
-DOCUMENTATION = r'''
- name: random_pet
- author:
- - Abhijeet Kasurde (@Akasurde)
- short_description: Generates random pet names
- version_added: '3.1.0'
- requirements:
- - petname U(https://github.com/dustinkirkland/python-petname)
+DOCUMENTATION = r"""
+name: random_pet
+author:
+ - Abhijeet Kasurde (@Akasurde)
+short_description: Generates random pet names
+version_added: '3.1.0'
+requirements:
+ - petname U(https://github.com/dustinkirkland/python-petname)
+description:
+ - Generates random pet names that can be used as unique identifiers for the resources.
+options:
+ words:
description:
- - Generates random pet names that can be used as unique identifiers for the resources.
- options:
- words:
- description:
- - The number of words in the pet name.
- default: 2
- type: int
- length:
- description:
- - The maximal length of every component of the pet name.
- - Values below 3 will be set to 3 by petname.
- default: 6
- type: int
- prefix:
- description: A string to prefix with the name.
- type: str
- separator:
- description: The character to separate words in the pet name.
- default: "-"
- type: str
-'''
+ - The number of words in the pet name.
+ default: 2
+ type: int
+ length:
+ description:
+ - The maximal length of every component of the pet name.
+ - Values below V(3) are set to V(3) by petname.
+ default: 6
+ type: int
+ prefix:
+ description: A string to prefix with the name.
+ type: str
+ separator:
+ description: The character to separate words in the pet name.
+ default: "-"
+ type: str
+"""
-EXAMPLES = r'''
+EXAMPLES = r"""
- name: Generate pet name
ansible.builtin.debug:
var: lookup('community.general.random_pet')
@@ -59,14 +59,14 @@ EXAMPLES = r'''
ansible.builtin.debug:
var: lookup('community.general.random_pet', length=7)
# Example result: 'natural-peacock'
-'''
+"""
-RETURN = r'''
- _raw:
- description: A one-element list containing a random pet name
- type: list
- elements: str
-'''
+RETURN = r"""
+_raw:
+ description: A one-element list containing a random pet name.
+ type: list
+ elements: str
+"""
try:
import petname
@@ -95,6 +95,6 @@ class LookupModule(LookupBase):
values = petname.Generate(words=words, separator=separator, letters=length)
if prefix:
- values = "%s%s%s" % (prefix, separator, values)
+ values = f"{prefix}{separator}{values}"
return [values]
diff --git a/plugins/lookup/random_string.py b/plugins/lookup/random_string.py
index 9b811dd8b3..4b227d3dca 100644
--- a/plugins/lookup/random_string.py
+++ b/plugins/lookup/random_string.py
@@ -9,95 +9,94 @@ from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r"""
- name: random_string
- author:
- - Abhijeet Kasurde (@Akasurde)
- short_description: Generates random string
- version_added: '3.2.0'
+name: random_string
+author:
+ - Abhijeet Kasurde (@Akasurde)
+short_description: Generates random string
+version_added: '3.2.0'
+description:
+ - Generates random string based upon the given constraints.
+ - Uses L(random.SystemRandom,https://docs.python.org/3/library/random.html#random.SystemRandom), so should be strong enough
+ for cryptographic purposes.
+options:
+ length:
+ description: The length of the string.
+ default: 8
+ type: int
+ upper:
description:
- - Generates random string based upon the given constraints.
- - Uses L(random.SystemRandom,https://docs.python.org/3/library/random.html#random.SystemRandom),
- so should be strong enough for cryptographic purposes.
- options:
- length:
- description: The length of the string.
- default: 8
- type: int
- upper:
- description:
- - Include uppercase letters in the string.
- default: true
- type: bool
- lower:
- description:
- - Include lowercase letters in the string.
- default: true
- type: bool
- numbers:
- description:
- - Include numbers in the string.
- default: true
- type: bool
- special:
- description:
- - Include special characters in the string.
- - Special characters are taken from Python standard library C(string).
- See L(the documentation of string.punctuation,https://docs.python.org/3/library/string.html#string.punctuation)
- for which characters will be used.
- - The choice of special characters can be changed to setting O(override_special).
- default: true
- type: bool
- min_numeric:
- description:
- - Minimum number of numeric characters in the string.
- - If set, overrides O(numbers=false).
- default: 0
- type: int
- min_upper:
- description:
- - Minimum number of uppercase alphabets in the string.
- - If set, overrides O(upper=false).
- default: 0
- type: int
- min_lower:
- description:
- - Minimum number of lowercase alphabets in the string.
- - If set, overrides O(lower=false).
- default: 0
- type: int
- min_special:
- description:
- - Minimum number of special character in the string.
- default: 0
- type: int
- override_special:
- description:
- - Override a list of special characters to use in the string.
- - If set O(min_special) should be set to a non-default value.
- type: str
- override_all:
- description:
- - Override all values of O(numbers), O(upper), O(lower), and O(special) with
- the given list of characters.
- type: str
- ignore_similar_chars:
- description:
- - Ignore similar characters, such as V(l) and V(1), or V(O) and V(0).
- - These characters can be configured in O(similar_chars).
- default: false
- type: bool
- version_added: 7.5.0
- similar_chars:
- description:
- - Override a list of characters not to be use in the string.
- default: "il1LoO0"
- type: str
- version_added: 7.5.0
- base64:
- description:
- - Returns base64 encoded string.
- type: bool
- default: false
+ - Include uppercase letters in the string.
+ default: true
+ type: bool
+ lower:
+ description:
+ - Include lowercase letters in the string.
+ default: true
+ type: bool
+ numbers:
+ description:
+ - Include numbers in the string.
+ default: true
+ type: bool
+ special:
+ description:
+ - Include special characters in the string.
+ - Special characters are taken from Python standard library C(string). See L(the documentation of
+ string.punctuation,https://docs.python.org/3/library/string.html#string.punctuation)
+ for which characters are used.
+ - The choice of special characters can be changed to setting O(override_special).
+ default: true
+ type: bool
+ min_numeric:
+ description:
+ - Minimum number of numeric characters in the string.
+ - If set, overrides O(numbers=false).
+ default: 0
+ type: int
+ min_upper:
+ description:
+ - Minimum number of uppercase alphabets in the string.
+ - If set, overrides O(upper=false).
+ default: 0
+ type: int
+ min_lower:
+ description:
+ - Minimum number of lowercase alphabets in the string.
+ - If set, overrides O(lower=false).
+ default: 0
+ type: int
+ min_special:
+ description:
+ - Minimum number of special character in the string.
+ default: 0
+ type: int
+ override_special:
+ description:
+ - Override a list of special characters to use in the string.
+ - If set O(min_special) should be set to a non-default value.
+ type: str
+ override_all:
+ description:
+ - Override all values of O(numbers), O(upper), O(lower), and O(special) with the given list of characters.
+ type: str
+ ignore_similar_chars:
+ description:
+ - Ignore similar characters, such as V(l) and V(1), or V(O) and V(0).
+ - These characters can be configured in O(similar_chars).
+ default: false
+ type: bool
+ version_added: 7.5.0
+ similar_chars:
+ description:
+ - Override a list of characters not to be use in the string.
+ default: "il1LoO0"
+ type: str
+ version_added: 7.5.0
+ base64:
+ description:
+ - Returns base64 encoded string.
+ type: bool
+ default: false
"""
EXAMPLES = r"""
@@ -142,10 +141,10 @@ EXAMPLES = r"""
"""
RETURN = r"""
- _raw:
- description: A one-element list containing a random string
- type: list
- elements: str
+_raw:
+ description: A one-element list containing a random string.
+ type: list
+ elements: str
"""
import base64
diff --git a/plugins/lookup/random_words.py b/plugins/lookup/random_words.py
index a4aa1b3178..247871dba0 100644
--- a/plugins/lookup/random_words.py
+++ b/plugins/lookup/random_words.py
@@ -10,44 +10,43 @@ from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r"""
- name: random_words
- author:
- - Thomas Sjögren (@konstruktoid)
- short_description: Return a number of random words
- version_added: "4.0.0"
- requirements:
- - xkcdpass U(https://github.com/redacted/XKCD-password-generator)
+name: random_words
+author:
+ - Thomas Sjögren (@konstruktoid)
+short_description: Return a number of random words
+version_added: "4.0.0"
+requirements:
+ - xkcdpass U(https://github.com/redacted/XKCD-password-generator)
+description:
+ - Returns a number of random words. The output can for example be used for passwords.
+ - See U(https://xkcd.com/936/) for background.
+options:
+ numwords:
description:
- - Returns a number of random words. The output can for example be used for
- passwords.
- - See U(https://xkcd.com/936/) for background.
- options:
- numwords:
- description:
- - The number of words.
- default: 6
- type: int
- min_length:
- description:
- - Minimum length of words to make password.
- default: 5
- type: int
- max_length:
- description:
- - Maximum length of words to make password.
- default: 9
- type: int
- delimiter:
- description:
- - The delimiter character between words.
- default: " "
- type: str
- case:
- description:
- - The method for setting the case of each word in the passphrase.
- choices: ["alternating", "upper", "lower", "random", "capitalize"]
- default: "lower"
- type: str
+ - The number of words.
+ default: 6
+ type: int
+ min_length:
+ description:
+ - Minimum length of words to make password.
+ default: 5
+ type: int
+ max_length:
+ description:
+ - Maximum length of words to make password.
+ default: 9
+ type: int
+ delimiter:
+ description:
+ - The delimiter character between words.
+ default: " "
+ type: str
+ case:
+ description:
+ - The method for setting the case of each word in the passphrase.
+ choices: ["alternating", "upper", "lower", "random", "capitalize"]
+ default: "lower"
+ type: str
"""
EXAMPLES = r"""
@@ -74,10 +73,10 @@ EXAMPLES = r"""
"""
RETURN = r"""
- _raw:
- description: A single-element list containing random words.
- type: list
- elements: str
+_raw:
+ description: A single-element list containing random words.
+ type: list
+ elements: str
"""
from ansible.errors import AnsibleLookupError
diff --git a/plugins/lookup/redis.py b/plugins/lookup/redis.py
index 17cbf120e9..bb5a122da3 100644
--- a/plugins/lookup/redis.py
+++ b/plugins/lookup/redis.py
@@ -6,50 +6,50 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
-DOCUMENTATION = '''
- name: redis
- author:
- - Jan-Piet Mens (@jpmens)
- - Ansible Core Team
- short_description: fetch data from Redis
- description:
- - This lookup returns a list of results from a Redis DB corresponding to a list of items given to it
- requirements:
- - redis (python library https://github.com/andymccurdy/redis-py/)
- options:
- _terms:
- description: list of keys to query
- type: list
- elements: string
- host:
- description: location of Redis host
- type: string
- default: '127.0.0.1'
- env:
- - name: ANSIBLE_REDIS_HOST
- ini:
- - section: lookup_redis
- key: host
- port:
- description: port on which Redis is listening on
- default: 6379
- type: int
- env:
- - name: ANSIBLE_REDIS_PORT
- ini:
- - section: lookup_redis
- key: port
- socket:
- description: path to socket on which to query Redis, this option overrides host and port options when set.
- type: path
- env:
- - name: ANSIBLE_REDIS_SOCKET
- ini:
- - section: lookup_redis
- key: socket
-'''
+DOCUMENTATION = r"""
+name: redis
+author:
+ - Jan-Piet Mens (@jpmens)
+ - Ansible Core Team
+short_description: Fetch data from Redis
+description:
+ - This lookup returns a list of results from a Redis DB corresponding to a list of items given to it.
+requirements:
+ - redis (python library https://github.com/andymccurdy/redis-py/)
+options:
+ _terms:
+ description: List of keys to query.
+ type: list
+ elements: string
+ host:
+ description: Location of Redis host.
+ type: string
+ default: '127.0.0.1'
+ env:
+ - name: ANSIBLE_REDIS_HOST
+ ini:
+ - section: lookup_redis
+ key: host
+ port:
+ description: Port on which Redis is listening on.
+ default: 6379
+ type: int
+ env:
+ - name: ANSIBLE_REDIS_PORT
+ ini:
+ - section: lookup_redis
+ key: port
+ socket:
+ description: Path to socket on which to query Redis, this option overrides host and port options when set.
+ type: path
+ env:
+ - name: ANSIBLE_REDIS_SOCKET
+ ini:
+ - section: lookup_redis
+ key: socket
+"""
-EXAMPLES = """
+EXAMPLES = r"""
- name: query redis for somekey (default or configured settings used)
ansible.builtin.debug:
msg: "{{ lookup('community.general.redis', 'somekey') }}"
@@ -66,12 +66,11 @@ EXAMPLES = """
- name: use list directly with a socket
ansible.builtin.debug:
msg: "{{ lookup('community.general.redis', 'key1', 'key2', socket='/var/tmp/redis.sock') }}"
-
"""
-RETURN = """
+RETURN = r"""
_raw:
- description: value(s) stored in Redis
+ description: Value(s) stored in Redis.
type: list
elements: str
"""
@@ -116,5 +115,5 @@ class LookupModule(LookupBase):
ret.append(to_text(res))
except Exception as e:
# connection failed or key not found
- raise AnsibleError('Encountered exception while fetching {0}: {1}'.format(term, e))
+ raise AnsibleError(f'Encountered exception while fetching {term}: {e}')
return ret
diff --git a/plugins/lookup/revbitspss.py b/plugins/lookup/revbitspss.py
index e4118e89eb..6b31963f4a 100644
--- a/plugins/lookup/revbitspss.py
+++ b/plugins/lookup/revbitspss.py
@@ -12,54 +12,55 @@ author: RevBits (@RevBits)
short_description: Get secrets from RevBits PAM server
version_added: 4.1.0
description:
- - Uses the revbits_ansible Python SDK to get Secrets from RevBits PAM
- Server using API key authentication with the REST API.
+ - Uses the revbits_ansible Python SDK to get Secrets from RevBits PAM Server using API key authentication with the REST
+ API.
requirements:
- - revbits_ansible - U(https://pypi.org/project/revbits_ansible/)
+ - revbits_ansible - U(https://pypi.org/project/revbits_ansible/)
options:
- _terms:
- description:
- - This will be an array of keys for secrets which you want to fetch from RevBits PAM.
- required: true
- type: list
- elements: string
- base_url:
- description:
- - This will be the base URL of the server, for example V(https://server-url-here).
- required: true
- type: string
- api_key:
- description:
- - This will be the API key for authentication. You can get it from the RevBits PAM secret manager module.
- required: true
- type: string
+ _terms:
+ description:
+ - This is an array of keys for secrets which you want to fetch from RevBits PAM.
+ required: true
+ type: list
+ elements: string
+ base_url:
+ description:
+ - This is the base URL of the server, for example V(https://server-url-here).
+ required: true
+ type: string
+ api_key:
+ description:
+ - This is the API key for authentication. You can get it from the RevBits PAM secret manager module.
+ required: true
+ type: string
"""
RETURN = r"""
_list:
- description:
- - The JSON responses which you can access with defined keys.
- - If you are fetching secrets named as UUID, PASSWORD it will gives you the dict of all secrets.
- type: list
- elements: dict
+ description:
+ - The JSON responses which you can access with defined keys.
+ - If you are fetching secrets named as UUID, PASSWORD it returns the dict of all secrets.
+ type: list
+ elements: dict
"""
EXAMPLES = r"""
+---
- hosts: localhost
vars:
- secret: >-
- {{
- lookup(
- 'community.general.revbitspss',
- 'UUIDPAM', 'DB_PASS',
- base_url='https://server-url-here',
- api_key='API_KEY_GOES_HERE'
- )
- }}
+ secret: >-
+ {{
+ lookup(
+ 'community.general.revbitspss',
+ 'UUIDPAM', 'DB_PASS',
+ base_url='https://server-url-here',
+ api_key='API_KEY_GOES_HERE'
+ )
+ }}
tasks:
- - ansible.builtin.debug:
- msg: >
- UUIDPAM is {{ (secret['UUIDPAM']) }} and DB_PASS is {{ (secret['DB_PASS']) }}
+ - ansible.builtin.debug:
+ msg: >-
+ UUIDPAM is {{ (secret['UUIDPAM']) }} and DB_PASS is {{ (secret['DB_PASS']) }}
"""
from ansible.plugins.lookup import LookupBase
@@ -100,8 +101,8 @@ class LookupModule(LookupBase):
result = []
for term in terms:
try:
- display.vvv("Secret Server lookup of Secret with ID %s" % term)
+ display.vvv(f"Secret Server lookup of Secret with ID {term}")
result.append({term: secret_server.get_pam_secret(term)})
except Exception as error:
- raise AnsibleError("Secret Server lookup failure: %s" % error.message)
+ raise AnsibleError(f"Secret Server lookup failure: {error.message}")
return result
diff --git a/plugins/lookup/shelvefile.py b/plugins/lookup/shelvefile.py
index 70d18338e9..f4142f67c6 100644
--- a/plugins/lookup/shelvefile.py
+++ b/plugins/lookup/shelvefile.py
@@ -6,34 +6,35 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
-DOCUMENTATION = '''
- name: shelvefile
- author: Alejandro Guirao (!UNKNOWN)
- short_description: read keys from Python shelve file
- description:
- - Read keys from Python shelve file.
- options:
- _terms:
- description: Sets of key value pairs of parameters.
- type: list
- elements: str
- key:
- description: Key to query.
- type: str
- required: true
- file:
- description: Path to shelve file.
- type: path
- required: true
-'''
+DOCUMENTATION = r"""
+name: shelvefile
+author: Alejandro Guirao (!UNKNOWN)
+short_description: Read keys from Python shelve file
+description:
+ - Read keys from Python shelve file.
+options:
+ _terms:
+ description: Sets of key value pairs of parameters.
+ type: list
+ elements: str
+ key:
+ description: Key to query.
+ type: str
+ required: true
+ file:
+ description: Path to shelve file.
+ type: path
+ required: true
+"""
-EXAMPLES = """
+EXAMPLES = r"""
+---
- name: Retrieve a string value corresponding to a key inside a Python shelve file
ansible.builtin.debug:
msg: "{{ lookup('community.general.shelvefile', 'file=path_to_some_shelve_file.db key=key_to_retrieve') }}"
"""
-RETURN = """
+RETURN = r"""
_list:
description: Value(s) of key(s) in shelve file(s).
type: list
@@ -71,7 +72,7 @@ class LookupModule(LookupBase):
for param in params:
name, value = param.split('=')
if name not in paramvals:
- raise AnsibleAssertionError('%s not in paramvals' % name)
+ raise AnsibleAssertionError(f'{name} not in paramvals')
paramvals[name] = value
except (ValueError, AssertionError) as e:
@@ -86,11 +87,11 @@ class LookupModule(LookupBase):
if shelvefile:
res = self.read_shelve(shelvefile, key)
if res is None:
- raise AnsibleError("Key %s not found in shelve file %s" % (key, shelvefile))
+ raise AnsibleError(f"Key {key} not found in shelve file {shelvefile}")
# Convert the value read to string
ret.append(to_text(res))
break
else:
- raise AnsibleError("Could not locate shelve file in lookup: %s" % paramvals['file'])
+ raise AnsibleError(f"Could not locate shelve file in lookup: {paramvals['file']}")
return ret
diff --git a/plugins/lookup/tss.py b/plugins/lookup/tss.py
index f2d79ed168..3d73fcbe99 100644
--- a/plugins/lookup/tss.py
+++ b/plugins/lookup/tss.py
@@ -12,200 +12,196 @@ author: Adam Migus (@amigus)
short_description: Get secrets from Thycotic Secret Server
version_added: 1.0.0
description:
- - Uses the Thycotic Secret Server Python SDK to get Secrets from Secret
- Server using token authentication with O(username) and O(password) on
- the REST API at O(base_url).
- - When using self-signed certificates the environment variable
- E(REQUESTS_CA_BUNDLE) can be set to a file containing the trusted certificates
- (in C(.pem) format).
- - For example, C(export REQUESTS_CA_BUNDLE='/etc/ssl/certs/ca-bundle.trust.crt').
+ - Uses the Thycotic Secret Server Python SDK to get Secrets from Secret Server using token authentication with O(username)
+ and O(password) on the REST API at O(base_url).
+ - When using self-signed certificates the environment variable E(REQUESTS_CA_BUNDLE) can be set to a file containing the
+ trusted certificates (in C(.pem) format).
+ - For example, C(export REQUESTS_CA_BUNDLE='/etc/ssl/certs/ca-bundle.trust.crt').
requirements:
- - python-tss-sdk - https://pypi.org/project/python-tss-sdk/
+ - python-tss-sdk - https://pypi.org/project/python-tss-sdk/
options:
- _terms:
- description: The integer ID of the secret.
- required: true
- type: list
- elements: int
- secret_path:
- description: Indicate a full path of secret including folder and secret name when the secret ID is set to 0.
- required: false
- type: str
- version_added: 7.2.0
- fetch_secret_ids_from_folder:
- description:
- - Boolean flag which indicates whether secret ids are in a folder is fetched by folder ID or not.
- - V(true) then the terms will be considered as a folder IDs. Otherwise (default), they are considered as secret IDs.
- required: false
- type: bool
- version_added: 7.1.0
- fetch_attachments:
- description:
- - Boolean flag which indicates whether attached files will get downloaded or not.
- - The download will only happen if O(file_download_path) has been provided.
- required: false
- type: bool
- version_added: 7.0.0
- file_download_path:
- description: Indicate the file attachment download location.
- required: false
- type: path
- version_added: 7.0.0
- base_url:
- description: The base URL of the server, for example V(https://localhost/SecretServer).
- type: string
- env:
- - name: TSS_BASE_URL
- ini:
- - section: tss_lookup
- key: base_url
- required: true
- username:
- description: The username with which to request the OAuth2 Access Grant.
- type: string
- env:
- - name: TSS_USERNAME
- ini:
- - section: tss_lookup
- key: username
- password:
- description:
- - The password associated with the supplied username.
- - Required when O(token) is not provided.
- type: string
- env:
- - name: TSS_PASSWORD
- ini:
- - section: tss_lookup
- key: password
- domain:
- default: ""
- description:
- - The domain with which to request the OAuth2 Access Grant.
- - Optional when O(token) is not provided.
- - Requires C(python-tss-sdk) version 1.0.0 or greater.
- type: string
- env:
- - name: TSS_DOMAIN
- ini:
- - section: tss_lookup
- key: domain
- required: false
- version_added: 3.6.0
- token:
- description:
- - Existing token for Thycotic authorizer.
- - If provided, O(username) and O(password) are not needed.
- - Requires C(python-tss-sdk) version 1.0.0 or greater.
- type: string
- env:
- - name: TSS_TOKEN
- ini:
- - section: tss_lookup
- key: token
- version_added: 3.7.0
- api_path_uri:
- default: /api/v1
- description: The path to append to the base URL to form a valid REST
- API request.
- type: string
- env:
- - name: TSS_API_PATH_URI
- required: false
- token_path_uri:
- default: /oauth2/token
- description: The path to append to the base URL to form a valid OAuth2
- Access Grant request.
- type: string
- env:
- - name: TSS_TOKEN_PATH_URI
- required: false
+ _terms:
+ description: The integer ID of the secret.
+ required: true
+ type: list
+ elements: int
+ secret_path:
+ description: Indicate a full path of secret including folder and secret name when the secret ID is set to 0.
+ required: false
+ type: str
+ version_added: 7.2.0
+ fetch_secret_ids_from_folder:
+ description:
+ - Boolean flag which indicates whether secret IDs are in a folder is fetched by folder ID or not.
+ - V(true) then the terms are considered as a folder IDs. Otherwise (default), they are considered as secret IDs.
+ required: false
+ type: bool
+ version_added: 7.1.0
+ fetch_attachments:
+ description:
+ - Boolean flag which indicates whether attached files are downloaded or not.
+ - The download only happens if O(file_download_path) has been provided.
+ required: false
+ type: bool
+ version_added: 7.0.0
+ file_download_path:
+ description: Indicate the file attachment download location.
+ required: false
+ type: path
+ version_added: 7.0.0
+ base_url:
+ description: The base URL of the server, for example V(https://localhost/SecretServer).
+ type: string
+ env:
+ - name: TSS_BASE_URL
+ ini:
+ - section: tss_lookup
+ key: base_url
+ required: true
+ username:
+ description: The username with which to request the OAuth2 Access Grant.
+ type: string
+ env:
+ - name: TSS_USERNAME
+ ini:
+ - section: tss_lookup
+ key: username
+ password:
+ description:
+ - The password associated with the supplied username.
+ - Required when O(token) is not provided.
+ type: string
+ env:
+ - name: TSS_PASSWORD
+ ini:
+ - section: tss_lookup
+ key: password
+ domain:
+ default: ""
+ description:
+ - The domain with which to request the OAuth2 Access Grant.
+ - Optional when O(token) is not provided.
+ - Requires C(python-tss-sdk) version 1.0.0 or greater.
+ type: string
+ env:
+ - name: TSS_DOMAIN
+ ini:
+ - section: tss_lookup
+ key: domain
+ required: false
+ version_added: 3.6.0
+ token:
+ description:
+ - Existing token for Thycotic authorizer.
+ - If provided, O(username) and O(password) are not needed.
+ - Requires C(python-tss-sdk) version 1.0.0 or greater.
+ type: string
+ env:
+ - name: TSS_TOKEN
+ ini:
+ - section: tss_lookup
+ key: token
+ version_added: 3.7.0
+ api_path_uri:
+ default: /api/v1
+ description: The path to append to the base URL to form a valid REST API request.
+ type: string
+ env:
+ - name: TSS_API_PATH_URI
+ required: false
+ token_path_uri:
+ default: /oauth2/token
+ description: The path to append to the base URL to form a valid OAuth2 Access Grant request.
+ type: string
+ env:
+ - name: TSS_TOKEN_PATH_URI
+ required: false
"""
RETURN = r"""
_list:
- description:
- - The JSON responses to C(GET /secrets/{id}).
- - See U(https://updates.thycotic.net/secretserver/restapiguide/TokenAuth/#operation--secrets--id--get).
- type: list
- elements: dict
+ description:
+ - The JSON responses to C(GET /secrets/{id}).
+ - See U(https://updates.thycotic.net/secretserver/restapiguide/TokenAuth/#operation--secrets--id--get).
+ type: list
+ elements: dict
"""
EXAMPLES = r"""
- hosts: localhost
vars:
- secret: >-
- {{
- lookup(
- 'community.general.tss',
- 102,
- base_url='https://secretserver.domain.com/SecretServer/',
- username='user.name',
- password='password'
- )
- }}
+ secret: >-
+ {{
+ lookup(
+ 'community.general.tss',
+ 102,
+ base_url='https://secretserver.domain.com/SecretServer/',
+ username='user.name',
+ password='password'
+ )
+ }}
tasks:
- - ansible.builtin.debug:
- msg: >
- the password is {{
- (secret['items']
- | items2dict(key_name='slug',
- value_name='itemValue'))['password']
- }}
+ - ansible.builtin.debug:
+ msg: >
+ the password is {{
+ (secret['items']
+ | items2dict(key_name='slug',
+ value_name='itemValue'))['password']
+ }}
- hosts: localhost
vars:
- secret: >-
- {{
- lookup(
- 'community.general.tss',
- 102,
- base_url='https://secretserver.domain.com/SecretServer/',
- username='user.name',
- password='password',
- domain='domain'
- )
- }}
+ secret: >-
+ {{
+ lookup(
+ 'community.general.tss',
+ 102,
+ base_url='https://secretserver.domain.com/SecretServer/',
+ username='user.name',
+ password='password',
+ domain='domain'
+ )
+ }}
tasks:
- - ansible.builtin.debug:
- msg: >
- the password is {{
- (secret['items']
- | items2dict(key_name='slug',
- value_name='itemValue'))['password']
- }}
+ - ansible.builtin.debug:
+ msg: >
+ the password is {{
+ (secret['items']
+ | items2dict(key_name='slug',
+ value_name='itemValue'))['password']
+ }}
- hosts: localhost
vars:
- secret_password: >-
- {{
- ((lookup(
- 'community.general.tss',
- 102,
- base_url='https://secretserver.domain.com/SecretServer/',
- token='thycotic_access_token',
- ) | from_json).get('items') | items2dict(key_name='slug', value_name='itemValue'))['password']
- }}
+ secret_password: >-
+ {{
+ ((lookup(
+ 'community.general.tss',
+ 102,
+ base_url='https://secretserver.domain.com/SecretServer/',
+ token='thycotic_access_token',
+ ) | from_json).get('items') | items2dict(key_name='slug', value_name='itemValue'))['password']
+ }}
tasks:
- - ansible.builtin.debug:
- msg: the password is {{ secret_password }}
+ - ansible.builtin.debug:
+ msg: the password is {{ secret_password }}
# Private key stores into certificate file which is attached with secret.
# If fetch_attachments=True then private key file will be download on specified path
# and file content will display in debug message.
- hosts: localhost
vars:
- secret: >-
- {{
- lookup(
- 'community.general.tss',
- 102,
- fetch_attachments=True,
- file_download_path='/home/certs',
- base_url='https://secretserver.domain.com/SecretServer/',
- token='thycotic_access_token'
- )
- }}
+ secret: >-
+ {{
+ lookup(
+ 'community.general.tss',
+ 102,
+ fetch_attachments=True,
+ file_download_path='/home/certs',
+ base_url='https://secretserver.domain.com/SecretServer/',
+ token='thycotic_access_token'
+ )
+ }}
tasks:
- ansible.builtin.debug:
msg: >
@@ -218,16 +214,16 @@ EXAMPLES = r"""
# If fetch_secret_ids_from_folder=true then secret IDs are in a folder is fetched based on folder ID
- hosts: localhost
vars:
- secret: >-
- {{
- lookup(
- 'community.general.tss',
- 102,
- fetch_secret_ids_from_folder=true,
- base_url='https://secretserver.domain.com/SecretServer/',
- token='thycotic_access_token'
- )
- }}
+ secret: >-
+ {{
+ lookup(
+ 'community.general.tss',
+ 102,
+ fetch_secret_ids_from_folder=true,
+ base_url='https://secretserver.domain.com/SecretServer/',
+ token='thycotic_access_token'
+ )
+ }}
tasks:
- ansible.builtin.debug:
msg: >
@@ -238,25 +234,25 @@ EXAMPLES = r"""
# If secret ID is 0 and secret_path has value then secret is fetched by secret path
- hosts: localhost
vars:
- secret: >-
- {{
- lookup(
- 'community.general.tss',
- 0,
- secret_path='\folderName\secretName'
- base_url='https://secretserver.domain.com/SecretServer/',
- username='user.name',
- password='password'
- )
- }}
+ secret: >-
+ {{
+ lookup(
+ 'community.general.tss',
+ 0,
+ secret_path='\folderName\secretName'
+ base_url='https://secretserver.domain.com/SecretServer/',
+ username='user.name',
+ password='password'
+ )
+ }}
tasks:
- - ansible.builtin.debug:
- msg: >
- the password is {{
- (secret['items']
- | items2dict(key_name='slug',
- value_name='itemValue'))['password']
- }}
+ - ansible.builtin.debug:
+ msg: >-
+ the password is {{
+ (secret['items']
+ | items2dict(key_name='slug',
+ value_name='itemValue'))['password']
+ }}
"""
import abc
@@ -306,14 +302,14 @@ class TSSClient(object):
return TSSClientV0(**server_parameters)
def get_secret(self, term, secret_path, fetch_file_attachments, file_download_path):
- display.debug("tss_lookup term: %s" % term)
+ display.debug(f"tss_lookup term: {term}")
secret_id = self._term_to_secret_id(term)
if secret_id == 0 and secret_path:
fetch_secret_by_path = True
- display.vvv(u"Secret Server lookup of Secret with path %s" % secret_path)
+ display.vvv(f"Secret Server lookup of Secret with path {secret_path}")
else:
fetch_secret_by_path = False
- display.vvv(u"Secret Server lookup of Secret with ID %d" % secret_id)
+ display.vvv(f"Secret Server lookup of Secret with ID {secret_id}")
if fetch_file_attachments:
if fetch_secret_by_path:
@@ -325,12 +321,12 @@ class TSSClient(object):
if i['isFile']:
try:
file_content = i['itemValue'].content
- with open(os.path.join(file_download_path, str(obj['id']) + "_" + i['slug']), "wb") as f:
+ with open(os.path.join(file_download_path, f"{obj['id']}_{i['slug']}"), "wb") as f:
f.write(file_content)
except ValueError:
- raise AnsibleOptionsError("Failed to download {0}".format(str(i['slug'])))
+ raise AnsibleOptionsError(f"Failed to download {i['slug']}")
except AttributeError:
- display.warning("Could not read file content for {0}".format(str(i['slug'])))
+ display.warning(f"Could not read file content for {i['slug']}")
finally:
i['itemValue'] = "*** Not Valid For Display ***"
else:
@@ -343,9 +339,9 @@ class TSSClient(object):
return self._client.get_secret_json(secret_id)
def get_secret_ids_by_folderid(self, term):
- display.debug("tss_lookup term: %s" % term)
+ display.debug(f"tss_lookup term: {term}")
folder_id = self._term_to_folder_id(term)
- display.vvv(u"Secret Server lookup of Secret id's with Folder ID %d" % folder_id)
+ display.vvv(f"Secret Server lookup of Secret id's with Folder ID {folder_id}")
return self._client.get_secret_ids_by_folderid(folder_id)
@@ -447,4 +443,4 @@ class LookupModule(LookupBase):
for term in terms
]
except SecretServerError as error:
- raise AnsibleError("Secret Server lookup failure: %s" % error.message)
+ raise AnsibleError(f"Secret Server lookup failure: {error.message}")
diff --git a/plugins/module_utils/_filelock.py b/plugins/module_utils/_filelock.py
index a35d0b91cf..4e782064be 100644
--- a/plugins/module_utils/_filelock.py
+++ b/plugins/module_utils/_filelock.py
@@ -46,8 +46,8 @@ class FileLock:
'''
Create a lock file based on path with flock to prevent other processes
using given path.
- Please note that currently file locking only works when it's executed by
- the same user, I.E single user scenarios
+ Please note that currently file locking only works when it is executed by
+ the same user, for example single user scenarios
:kw path: Path (file) to lock
:kw tmpdir: Path where to place the temporary .lock file
diff --git a/plugins/module_utils/android_sdkmanager.py b/plugins/module_utils/android_sdkmanager.py
new file mode 100644
index 0000000000..9cbb2df6b0
--- /dev/null
+++ b/plugins/module_utils/android_sdkmanager.py
@@ -0,0 +1,148 @@
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2024, Stanislav Shamilov
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+import re
+
+from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, cmd_runner_fmt
+
+__state_map = {
+ "present": "--install",
+ "absent": "--uninstall"
+}
+
+# sdkmanager --help 2>&1 | grep -A 2 -- --channel
+__channel_map = {
+ "stable": 0,
+ "beta": 1,
+ "dev": 2,
+ "canary": 3
+}
+
+
+def __map_channel(channel_name):
+ if channel_name not in __channel_map:
+ raise ValueError("Unknown channel name '%s'" % channel_name)
+ return __channel_map[channel_name]
+
+
+def sdkmanager_runner(module, **kwargs):
+ return CmdRunner(
+ module,
+ command='sdkmanager',
+ arg_formats=dict(
+ state=cmd_runner_fmt.as_map(__state_map),
+ name=cmd_runner_fmt.as_list(),
+ installed=cmd_runner_fmt.as_fixed("--list_installed"),
+ list=cmd_runner_fmt.as_fixed('--list'),
+ newer=cmd_runner_fmt.as_fixed("--newer"),
+ sdk_root=cmd_runner_fmt.as_opt_eq_val("--sdk_root"),
+ channel=cmd_runner_fmt.as_func(lambda x: ["{0}={1}".format("--channel", __map_channel(x))])
+ ),
+ force_lang="C.UTF-8", # Without this, sdkmanager binary crashes
+ **kwargs
+ )
+
+
+class Package:
+ def __init__(self, name):
+ self.name = name
+
+ def __hash__(self):
+ return hash(self.name)
+
+ def __ne__(self, other):
+ if not isinstance(other, Package):
+ return True
+ return self.name != other.name
+
+ def __eq__(self, other):
+ if not isinstance(other, Package):
+ return False
+
+ return self.name == other.name
+
+
+class SdkManagerException(Exception):
+ pass
+
+
+class AndroidSdkManager(object):
+ _RE_INSTALLED_PACKAGES_HEADER = re.compile(r'^Installed packages:$')
+ _RE_UPDATABLE_PACKAGES_HEADER = re.compile(r'^Available Updates:$')
+
+ # Example: ' platform-tools | 27.0.0 | Android SDK Platform-Tools 27 | platform-tools '
+ _RE_INSTALLED_PACKAGE = re.compile(r'^\s*(?P\S+)\s*\|\s*[0-9][^|]*\b\s*\|\s*.+\s*\|\s*(\S+)\s*$')
+
+ # Example: ' platform-tools | 27.0.0 | 35.0.2'
+ _RE_UPDATABLE_PACKAGE = re.compile(r'^\s*(?P\S+)\s*\|\s*[0-9][^|]*\b\s*\|\s*[0-9].*\b\s*$')
+
+ _RE_UNKNOWN_PACKAGE = re.compile(r'^Warning: Failed to find package \'(?P\S+)\'\s*$')
+ _RE_ACCEPT_LICENSE = re.compile(r'^The following packages can not be installed since their licenses or those of '
+ r'the packages they depend on were not accepted')
+
+ def __init__(self, module):
+ self.runner = sdkmanager_runner(module)
+
+ def get_installed_packages(self):
+ with self.runner('installed sdk_root channel') as ctx:
+ rc, stdout, stderr = ctx.run()
+ return self._parse_packages(stdout, self._RE_INSTALLED_PACKAGES_HEADER, self._RE_INSTALLED_PACKAGE)
+
+ def get_updatable_packages(self):
+ with self.runner('list newer sdk_root channel') as ctx:
+ rc, stdout, stderr = ctx.run()
+ return self._parse_packages(stdout, self._RE_UPDATABLE_PACKAGES_HEADER, self._RE_UPDATABLE_PACKAGE)
+
+ def apply_packages_changes(self, packages, accept_licenses=False):
+ """ Install or delete packages, depending on the `module.vars.state` parameter """
+ if len(packages) == 0:
+ return 0, '', ''
+
+ if accept_licenses:
+ license_prompt_answer = 'y'
+ else:
+ license_prompt_answer = 'N'
+ for package in packages:
+ with self.runner('state name sdk_root channel', data=license_prompt_answer) as ctx:
+ rc, stdout, stderr = ctx.run(name=package.name)
+
+ for line in stdout.splitlines():
+ if self._RE_ACCEPT_LICENSE.match(line):
+ raise SdkManagerException("Licenses for some packages were not accepted")
+
+ if rc != 0:
+ self._try_parse_stderr(stderr)
+ return rc, stdout, stderr
+ return 0, '', ''
+
+ def _try_parse_stderr(self, stderr):
+ data = stderr.splitlines()
+ for line in data:
+ unknown_package_regex = self._RE_UNKNOWN_PACKAGE.match(line)
+ if unknown_package_regex:
+ package = unknown_package_regex.group('package')
+ raise SdkManagerException("Unknown package %s" % package)
+
+ @staticmethod
+ def _parse_packages(stdout, header_regexp, row_regexp):
+ data = stdout.splitlines()
+
+ section_found = False
+ packages = set()
+
+ for line in data:
+ if not section_found:
+ section_found = header_regexp.match(line)
+ continue
+ else:
+ p = row_regexp.match(line)
+ if p:
+ packages.add(Package(p.group('name')))
+ return packages
diff --git a/plugins/module_utils/cmd_runner.py b/plugins/module_utils/cmd_runner.py
index f9d6e98056..10278964bb 100644
--- a/plugins/module_utils/cmd_runner.py
+++ b/plugins/module_utils/cmd_runner.py
@@ -7,11 +7,10 @@ from __future__ import absolute_import, division, print_function
__metaclass__ = type
import os
-from functools import wraps
from ansible.module_utils.common.collections import is_sequence
-from ansible.module_utils.six import iteritems
from ansible.module_utils.common.locale import get_best_parsable_locale
+from ansible_collections.community.general.plugins.module_utils import cmd_runner_fmt
def _ensure_list(value):
@@ -89,129 +88,6 @@ class FormatError(CmdRunnerException):
)
-class _ArgFormat(object):
- # DEPRECATION: set default value for ignore_none to True in community.general 12.0.0
- def __init__(self, func, ignore_none=None, ignore_missing_value=False):
- self.func = func
- self.ignore_none = ignore_none
- self.ignore_missing_value = ignore_missing_value
-
- # DEPRECATION: remove parameter ctx_ignore_none in community.general 12.0.0
- def __call__(self, value, ctx_ignore_none=True):
- # DEPRECATION: replace ctx_ignore_none with True in community.general 12.0.0
- ignore_none = self.ignore_none if self.ignore_none is not None else ctx_ignore_none
- if value is None and ignore_none:
- return []
- f = self.func
- return [str(x) for x in f(value)]
-
- def __str__(self):
- return "".format(
- self.func,
- self.ignore_none,
- self.ignore_missing_value,
- )
-
- def __repr__(self):
- return str(self)
-
-
-class _Format(object):
- @staticmethod
- def as_bool(args_true, args_false=None, ignore_none=None):
- if args_false is not None:
- if ignore_none is None:
- ignore_none = False
- else:
- args_false = []
- return _ArgFormat(lambda value: _ensure_list(args_true) if value else _ensure_list(args_false), ignore_none=ignore_none)
-
- @staticmethod
- def as_bool_not(args):
- return _Format.as_bool([], args, ignore_none=False)
-
- @staticmethod
- def as_optval(arg, ignore_none=None):
- return _ArgFormat(lambda value: ["{0}{1}".format(arg, value)], ignore_none=ignore_none)
-
- @staticmethod
- def as_opt_val(arg, ignore_none=None):
- return _ArgFormat(lambda value: [arg, value], ignore_none=ignore_none)
-
- @staticmethod
- def as_opt_eq_val(arg, ignore_none=None):
- return _ArgFormat(lambda value: ["{0}={1}".format(arg, value)], ignore_none=ignore_none)
-
- @staticmethod
- def as_list(ignore_none=None, min_len=0, max_len=None):
- def func(value):
- value = _ensure_list(value)
- if len(value) < min_len:
- raise ValueError("Parameter must have at least {0} element(s)".format(min_len))
- if max_len is not None and len(value) > max_len:
- raise ValueError("Parameter must have at most {0} element(s)".format(max_len))
- return value
- return _ArgFormat(func, ignore_none=ignore_none)
-
- @staticmethod
- def as_fixed(args):
- return _ArgFormat(lambda value: _ensure_list(args), ignore_none=False, ignore_missing_value=True)
-
- @staticmethod
- def as_func(func, ignore_none=None):
- return _ArgFormat(func, ignore_none=ignore_none)
-
- @staticmethod
- def as_map(_map, default=None, ignore_none=None):
- if default is None:
- default = []
- return _ArgFormat(lambda value: _ensure_list(_map.get(value, default)), ignore_none=ignore_none)
-
- @staticmethod
- def as_default_type(_type, arg="", ignore_none=None):
- #
- # DEPRECATION: This method is deprecated and will be removed in community.general 10.0.0
- #
- # Instead of using the implicit formats provided here, use the explicit necessary format method.
- #
- fmt = _Format
- if _type == "dict":
- return fmt.as_func(lambda d: ["--{0}={1}".format(*a) for a in iteritems(d)], ignore_none=ignore_none)
- if _type == "list":
- return fmt.as_func(lambda value: ["--{0}".format(x) for x in value], ignore_none=ignore_none)
- if _type == "bool":
- return fmt.as_bool("--{0}".format(arg))
-
- return fmt.as_opt_val("--{0}".format(arg), ignore_none=ignore_none)
-
- @staticmethod
- def unpack_args(func):
- @wraps(func)
- def wrapper(v):
- return func(*v)
- return wrapper
-
- @staticmethod
- def unpack_kwargs(func):
- @wraps(func)
- def wrapper(v):
- return func(**v)
- return wrapper
-
- @staticmethod
- def stack(fmt):
- @wraps(fmt)
- def wrapper(*args, **kwargs):
- new_func = fmt(ignore_none=True, *args, **kwargs)
-
- def stacking(value):
- stack = [new_func(v) for v in value if v]
- stack = [x for args in stack for x in args]
- return stack
- return _ArgFormat(stacking, ignore_none=True)
- return wrapper
-
-
class CmdRunner(object):
"""
Wrapper for ``AnsibleModule.run_command()``.
@@ -233,8 +109,8 @@ class CmdRunner(object):
arg_formats = {}
self.arg_formats = {}
for fmt_name, fmt in arg_formats.items():
- if not isinstance(fmt, _ArgFormat):
- fmt = _Format.as_func(func=fmt, ignore_none=True)
+ if not cmd_runner_fmt.is_argformat(fmt):
+ fmt = cmd_runner_fmt.as_func(func=fmt, ignore_none=True)
self.arg_formats[fmt_name] = fmt
self.check_rc = check_rc
if force_lang == "auto":
@@ -252,10 +128,6 @@ class CmdRunner(object):
_cmd = self.command[0]
self.command[0] = _cmd if (os.path.isabs(_cmd) or '/' in _cmd) else module.get_bin_path(_cmd, opt_dirs=path_prefix, required=True)
- for mod_param_name, spec in iteritems(module.argument_spec):
- if mod_param_name not in self.arg_formats:
- self.arg_formats[mod_param_name] = _Format.as_default_type(spec.get('type', 'str'), mod_param_name)
-
@property
def binary(self):
return self.command[0]
@@ -372,6 +244,3 @@ class _CmdRunnerContext(object):
def __exit__(self, exc_type, exc_val, exc_tb):
return False
-
-
-cmd_runner_fmt = _Format()
diff --git a/plugins/module_utils/cmd_runner_fmt.py b/plugins/module_utils/cmd_runner_fmt.py
new file mode 100644
index 0000000000..8b415edcf9
--- /dev/null
+++ b/plugins/module_utils/cmd_runner_fmt.py
@@ -0,0 +1,125 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2024, Alexei Znamensky
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from functools import wraps
+
+from ansible.module_utils.common.collections import is_sequence
+
+
+def _ensure_list(value):
+ return list(value) if is_sequence(value) else [value]
+
+
+class _ArgFormat(object):
+ # DEPRECATION: set default value for ignore_none to True in community.general 12.0.0
+ def __init__(self, func, ignore_none=None, ignore_missing_value=False):
+ self.func = func
+ self.ignore_none = ignore_none
+ self.ignore_missing_value = ignore_missing_value
+
+ # DEPRECATION: remove parameter ctx_ignore_none in community.general 12.0.0
+ def __call__(self, value, ctx_ignore_none=True):
+ # DEPRECATION: replace ctx_ignore_none with True in community.general 12.0.0
+ ignore_none = self.ignore_none if self.ignore_none is not None else ctx_ignore_none
+ if value is None and ignore_none:
+ return []
+ f = self.func
+ return [str(x) for x in f(value)]
+
+ def __str__(self):
+ return "".format(
+ self.func,
+ self.ignore_none,
+ self.ignore_missing_value,
+ )
+
+ def __repr__(self):
+ return str(self)
+
+
+def as_bool(args_true, args_false=None, ignore_none=None):
+ if args_false is not None:
+ if ignore_none is None:
+ ignore_none = False
+ else:
+ args_false = []
+ return _ArgFormat(lambda value: _ensure_list(args_true) if value else _ensure_list(args_false), ignore_none=ignore_none)
+
+
+def as_bool_not(args):
+ return as_bool([], args, ignore_none=False)
+
+
+def as_optval(arg, ignore_none=None):
+ return _ArgFormat(lambda value: ["{0}{1}".format(arg, value)], ignore_none=ignore_none)
+
+
+def as_opt_val(arg, ignore_none=None):
+ return _ArgFormat(lambda value: [arg, value], ignore_none=ignore_none)
+
+
+def as_opt_eq_val(arg, ignore_none=None):
+ return _ArgFormat(lambda value: ["{0}={1}".format(arg, value)], ignore_none=ignore_none)
+
+
+def as_list(ignore_none=None, min_len=0, max_len=None):
+ def func(value):
+ value = _ensure_list(value)
+ if len(value) < min_len:
+ raise ValueError("Parameter must have at least {0} element(s)".format(min_len))
+ if max_len is not None and len(value) > max_len:
+ raise ValueError("Parameter must have at most {0} element(s)".format(max_len))
+ return value
+ return _ArgFormat(func, ignore_none=ignore_none)
+
+
+def as_fixed(*args):
+ if len(args) == 1 and is_sequence(args[0]):
+ args = args[0]
+ return _ArgFormat(lambda value: _ensure_list(args), ignore_none=False, ignore_missing_value=True)
+
+
+def as_func(func, ignore_none=None):
+ return _ArgFormat(func, ignore_none=ignore_none)
+
+
+def as_map(_map, default=None, ignore_none=None):
+ if default is None:
+ default = []
+ return _ArgFormat(lambda value: _ensure_list(_map.get(value, default)), ignore_none=ignore_none)
+
+
+def unpack_args(func):
+ @wraps(func)
+ def wrapper(v):
+ return func(*v)
+ return wrapper
+
+
+def unpack_kwargs(func):
+ @wraps(func)
+ def wrapper(v):
+ return func(**v)
+ return wrapper
+
+
+def stack(fmt):
+ @wraps(fmt)
+ def wrapper(*args, **kwargs):
+ new_func = fmt(ignore_none=True, *args, **kwargs)
+
+ def stacking(value):
+ stack = [new_func(v) for v in value if v]
+ stack = [x for args in stack for x in args]
+ return stack
+ return _ArgFormat(stacking, ignore_none=True)
+ return wrapper
+
+
+def is_argformat(fmt):
+ return isinstance(fmt, _ArgFormat)
diff --git a/plugins/module_utils/deps.py b/plugins/module_utils/deps.py
index a2413d1952..66847ccd25 100644
--- a/plugins/module_utils/deps.py
+++ b/plugins/module_utils/deps.py
@@ -96,3 +96,7 @@ def validate(module, spec=None):
def failed(spec=None):
return any(_deps[d].failed for d in _select_names(spec))
+
+
+def clear():
+ _deps.clear()
diff --git a/plugins/module_utils/django.py b/plugins/module_utils/django.py
index 5fb375c6fd..4f5293c09f 100644
--- a/plugins/module_utils/django.py
+++ b/plugins/module_utils/django.py
@@ -32,6 +32,7 @@ _django_std_arg_fmts = dict(
verbosity=cmd_runner_fmt.as_opt_val("--verbosity"),
no_color=cmd_runner_fmt.as_fixed("--no-color"),
skip_checks=cmd_runner_fmt.as_bool("--skip-checks"),
+ version=cmd_runner_fmt.as_fixed("--version"),
)
_django_database_args = dict(
@@ -60,14 +61,15 @@ class _DjangoRunner(PythonRunner):
)
return super(_DjangoRunner, self).__call__(args_order, output_process, ignore_value_none, check_mode_skip, check_mode_return, **kwargs)
+ def bare_context(self, *args, **kwargs):
+ return super(_DjangoRunner, self).__call__(*args, **kwargs)
+
class DjangoModuleHelper(ModuleHelper):
module = {}
- use_old_vardict = False
django_admin_cmd = None
arg_formats = {}
django_admin_arg_order = ()
- use_old_vardict = False
_django_args = []
_check_mode_arg = ""
@@ -98,16 +100,20 @@ class DjangoModuleHelper(ModuleHelper):
arg_formats=self.arg_formats,
venv=self.vars.venv,
check_rc=True)
+
+ run_params = self.vars.as_dict()
+ if self._check_mode_arg:
+ run_params.update({self._check_mode_arg: self.check_mode})
+
+ rc, out, err = runner.bare_context("version").run()
+ self.vars.version = out.strip()
+
with runner() as ctx:
- run_params = self.vars.as_dict()
- if self._check_mode_arg:
- run_params.update({self._check_mode_arg: self.check_mode})
results = ctx.run(**run_params)
self.vars.stdout = ctx.results_out
self.vars.stderr = ctx.results_err
self.vars.cmd = ctx.cmd
- if self.verbosity >= 3:
- self.vars.run_info = ctx.run_info
+ self.vars.set("run_info", ctx.run_info, verbosity=3)
return results
diff --git a/plugins/module_utils/gconftool2.py b/plugins/module_utils/gconftool2.py
index e90c3fb2cb..8e04f9ee3f 100644
--- a/plugins/module_utils/gconftool2.py
+++ b/plugins/module_utils/gconftool2.py
@@ -27,6 +27,7 @@ def gconftool2_runner(module, **kwargs):
value=cmd_runner_fmt.as_list(),
direct=cmd_runner_fmt.as_bool("--direct"),
config_source=cmd_runner_fmt.as_opt_val("--config-source"),
+ version=cmd_runner_fmt.as_fixed("--version"),
),
**kwargs
)
diff --git a/plugins/module_utils/gio_mime.py b/plugins/module_utils/gio_mime.py
index e01709487d..c734e13a81 100644
--- a/plugins/module_utils/gio_mime.py
+++ b/plugins/module_utils/gio_mime.py
@@ -12,10 +12,12 @@ from ansible_collections.community.general.plugins.module_utils.cmd_runner impor
def gio_mime_runner(module, **kwargs):
return CmdRunner(
module,
- command=['gio', 'mime'],
+ command=['gio'],
arg_formats=dict(
+ mime=cmd_runner_fmt.as_fixed('mime'),
mime_type=cmd_runner_fmt.as_list(),
handler=cmd_runner_fmt.as_list(),
+ version=cmd_runner_fmt.as_fixed('--version'),
),
**kwargs
)
@@ -28,5 +30,5 @@ def gio_mime_get(runner, mime_type):
out = out.splitlines()[0]
return out.split()[-1]
- with runner("mime_type", output_process=process) as ctx:
+ with runner("mime mime_type", output_process=process) as ctx:
return ctx.run(mime_type=mime_type)
diff --git a/plugins/module_utils/gitlab.py b/plugins/module_utils/gitlab.py
index 224789a71e..3c0014cfe9 100644
--- a/plugins/module_utils/gitlab.py
+++ b/plugins/module_utils/gitlab.py
@@ -111,29 +111,16 @@ def gitlab_authentication(module, min_version=None):
verify = ca_path if validate_certs and ca_path else validate_certs
try:
- # python-gitlab library remove support for username/password authentication since 1.13.0
- # Changelog : https://github.com/python-gitlab/python-gitlab/releases/tag/v1.13.0
- # This condition allow to still support older version of the python-gitlab library
- if LooseVersion(gitlab.__version__) < LooseVersion("1.13.0"):
- module.deprecate(
- "GitLab basic auth is deprecated and will be removed in next major version, "
- "using another auth method (API token or OAuth) is strongly recommended.",
- version='10.0.0',
- collection_name='community.general')
- gitlab_instance = gitlab.Gitlab(url=gitlab_url, ssl_verify=verify, email=gitlab_user, password=gitlab_password,
- private_token=gitlab_token, api_version=4)
- else:
- # We can create an oauth_token using a username and password
- # https://docs.gitlab.com/ee/api/oauth2.html#authorization-code-flow
- if gitlab_user:
- data = {'grant_type': 'password', 'username': gitlab_user, 'password': gitlab_password}
- resp = requests.post(urljoin(gitlab_url, "oauth/token"), data=data, verify=verify)
- resp_data = resp.json()
- gitlab_oauth_token = resp_data["access_token"]
-
- gitlab_instance = gitlab.Gitlab(url=gitlab_url, ssl_verify=verify, private_token=gitlab_token,
- oauth_token=gitlab_oauth_token, job_token=gitlab_job_token, api_version=4)
+ # We can create an oauth_token using a username and password
+ # https://docs.gitlab.com/ee/api/oauth2.html#authorization-code-flow
+ if gitlab_user:
+ data = {'grant_type': 'password', 'username': gitlab_user, 'password': gitlab_password}
+ resp = requests.post(urljoin(gitlab_url, "oauth/token"), data=data, verify=verify)
+ resp_data = resp.json()
+ gitlab_oauth_token = resp_data["access_token"]
+ gitlab_instance = gitlab.Gitlab(url=gitlab_url, ssl_verify=verify, private_token=gitlab_token,
+ oauth_token=gitlab_oauth_token, job_token=gitlab_job_token, api_version=4)
gitlab_instance.auth()
except (gitlab.exceptions.GitlabAuthenticationError, gitlab.exceptions.GitlabGetError) as e:
module.fail_json(msg="Failed to connect to GitLab server: %s" % to_native(e))
diff --git a/plugins/module_utils/identity/keycloak/keycloak.py b/plugins/module_utils/identity/keycloak/keycloak.py
index 02465a70fa..e053eca305 100644
--- a/plugins/module_utils/identity/keycloak/keycloak.py
+++ b/plugins/module_utils/identity/keycloak/keycloak.py
@@ -142,6 +142,7 @@ def keycloak_argument_spec():
validate_certs=dict(type='bool', default=True),
connection_timeout=dict(type='int', default=10),
token=dict(type='str', no_log=True),
+ refresh_token=dict(type='str', no_log=True),
http_agent=dict(type='str', default='Ansible'),
)
@@ -151,58 +152,142 @@ def camel(words):
class KeycloakError(Exception):
- pass
+ def __init__(self, msg, authError=None):
+ self.msg = msg
+ self.authError = authError
+
+ def __str__(self):
+ return str(self.msg)
+
+
+def _token_request(module_params, payload):
+ """ Obtains connection header with token for the authentication,
+ using the provided auth_username/auth_password
+ :param module_params: parameters of the module
+ :param payload:
+ type:
+ dict
+ description:
+ Authentication request payload. Must contain at least
+ 'grant_type' and 'client_id', optionally 'client_secret',
+ along with parameters based on 'grant_type'; e.g.,
+ 'username'/'password' for type 'password',
+ 'refresh_token' for type 'refresh_token'.
+ :return: access token
+ """
+ base_url = module_params.get('auth_keycloak_url')
+ if not base_url.lower().startswith(('http', 'https')):
+ raise KeycloakError("auth_url '%s' should either start with 'http' or 'https'." % base_url)
+ auth_realm = module_params.get('auth_realm')
+ auth_url = URL_TOKEN.format(url=base_url, realm=auth_realm)
+ http_agent = module_params.get('http_agent')
+ validate_certs = module_params.get('validate_certs')
+ connection_timeout = module_params.get('connection_timeout')
+
+ try:
+ r = json.loads(to_native(open_url(auth_url, method='POST',
+ validate_certs=validate_certs, http_agent=http_agent, timeout=connection_timeout,
+ data=urlencode(payload)).read()))
+
+ return r['access_token']
+ except ValueError as e:
+ raise KeycloakError(
+ 'API returned invalid JSON when trying to obtain access token from %s: %s'
+ % (auth_url, str(e)))
+ except KeyError:
+ raise KeycloakError(
+ 'API did not include access_token field in response from %s' % auth_url)
+ except Exception as e:
+ raise KeycloakError('Could not obtain access token from %s: %s'
+ % (auth_url, str(e)), authError=e)
+
+
+def _request_token_using_credentials(module_params):
+ """ Obtains connection header with token for the authentication,
+ using the provided auth_username/auth_password
+ :param module_params: parameters of the module. Must include 'auth_username' and 'auth_password'.
+ :return: connection header
+ """
+ client_id = module_params.get('auth_client_id')
+ auth_username = module_params.get('auth_username')
+ auth_password = module_params.get('auth_password')
+ client_secret = module_params.get('auth_client_secret')
+
+ temp_payload = {
+ 'grant_type': 'password',
+ 'client_id': client_id,
+ 'client_secret': client_secret,
+ 'username': auth_username,
+ 'password': auth_password,
+ }
+ # Remove empty items, for instance missing client_secret
+ payload = {k: v for k, v in temp_payload.items() if v is not None}
+
+ return _token_request(module_params, payload)
+
+
+def _request_token_using_refresh_token(module_params):
+ """ Obtains connection header with token for the authentication,
+ using the provided refresh_token
+ :param module_params: parameters of the module. Must include 'refresh_token'.
+ :return: connection header
+ """
+ client_id = module_params.get('auth_client_id')
+ refresh_token = module_params.get('refresh_token')
+ client_secret = module_params.get('auth_client_secret')
+
+ temp_payload = {
+ 'grant_type': 'refresh_token',
+ 'client_id': client_id,
+ 'client_secret': client_secret,
+ 'refresh_token': refresh_token,
+ }
+ # Remove empty items, for instance missing client_secret
+ payload = {k: v for k, v in temp_payload.items() if v is not None}
+
+ return _token_request(module_params, payload)
+
+
+def _request_token_using_client_credentials(module_params):
+ """ Obtains connection header with token for the authentication,
+ using the provided auth_client_id and auth_client_secret by grant_type
+ client_credentials. Ensure that the used client uses client authorization
+ with service account roles enabled and required service roles assigned.
+ :param module_params: parameters of the module. Must include 'auth_client_id'
+ and 'auth_client_secret'..
+ :return: connection header
+ """
+ client_id = module_params.get('auth_client_id')
+ client_secret = module_params.get('auth_client_secret')
+
+ temp_payload = {
+ 'grant_type': 'client_credentials',
+ 'client_id': client_id,
+ 'client_secret': client_secret,
+ }
+ # Remove empty items, for instance missing client_secret
+ payload = {k: v for k, v in temp_payload.items() if v is not None}
+
+ return _token_request(module_params, payload)
def get_token(module_params):
""" Obtains connection header with token for the authentication,
- token already given or obtained from credentials
- :param module_params: parameters of the module
- :return: connection header
+ token already given or obtained from credentials
+ :param module_params: parameters of the module
+ :return: connection header
"""
token = module_params.get('token')
- base_url = module_params.get('auth_keycloak_url')
- http_agent = module_params.get('http_agent')
-
- if not base_url.lower().startswith(('http', 'https')):
- raise KeycloakError("auth_url '%s' should either start with 'http' or 'https'." % base_url)
if token is None:
- base_url = module_params.get('auth_keycloak_url')
- validate_certs = module_params.get('validate_certs')
- auth_realm = module_params.get('auth_realm')
- client_id = module_params.get('auth_client_id')
+ auth_client_id = module_params.get('auth_client_id')
+ auth_client_secret = module_params.get('auth_client_secret')
auth_username = module_params.get('auth_username')
- auth_password = module_params.get('auth_password')
- client_secret = module_params.get('auth_client_secret')
- connection_timeout = module_params.get('connection_timeout')
- auth_url = URL_TOKEN.format(url=base_url, realm=auth_realm)
- temp_payload = {
- 'grant_type': 'password',
- 'client_id': client_id,
- 'client_secret': client_secret,
- 'username': auth_username,
- 'password': auth_password,
- }
- # Remove empty items, for instance missing client_secret
- payload = {k: v for k, v in temp_payload.items() if v is not None}
- try:
- r = json.loads(to_native(open_url(auth_url, method='POST',
- validate_certs=validate_certs, http_agent=http_agent, timeout=connection_timeout,
- data=urlencode(payload)).read()))
- except ValueError as e:
- raise KeycloakError(
- 'API returned invalid JSON when trying to obtain access token from %s: %s'
- % (auth_url, str(e)))
- except Exception as e:
- raise KeycloakError('Could not obtain access token from %s: %s'
- % (auth_url, str(e)))
+ if auth_client_id is not None and auth_client_secret is not None and auth_username is None:
+ token = _request_token_using_client_credentials(module_params)
+ else:
+ token = _request_token_using_credentials(module_params)
- try:
- token = r['access_token']
- except KeyError:
- raise KeycloakError(
- 'Could not obtain access token from %s' % auth_url)
return {
'Authorization': 'Bearer ' + token,
'Content-Type': 'application/json'
@@ -272,6 +357,7 @@ class KeycloakAPI(object):
""" Keycloak API access; Keycloak uses OAuth 2.0 to protect its API, an access token for which
is obtained through OpenID connect
"""
+
def __init__(self, module, connection_header):
self.module = module
self.baseurl = self.module.params.get('auth_keycloak_url')
@@ -280,6 +366,87 @@ class KeycloakAPI(object):
self.restheaders = connection_header
self.http_agent = self.module.params.get('http_agent')
+ def _request(self, url, method, data=None):
+ """ Makes a request to Keycloak and returns the raw response.
+ If a 401 is returned, attempts to re-authenticate
+ using first the module's refresh_token (if provided)
+ and then the module's username/password (if provided).
+ On successful re-authentication, the new token is stored
+ in the restheaders for future requests.
+
+ :param url: request path
+ :param method: request method (e.g., 'GET', 'POST', etc.)
+ :param data: (optional) data for request
+ :return: raw API response
+ """
+ def make_request_catching_401():
+ try:
+ return open_url(url, method=method, data=data,
+ http_agent=self.http_agent, headers=self.restheaders,
+ timeout=self.connection_timeout,
+ validate_certs=self.validate_certs)
+ except HTTPError as e:
+ if e.code != 401:
+ raise e
+ return e
+
+ r = make_request_catching_401()
+
+ if isinstance(r, Exception):
+ # Try to refresh token and retry, if available
+ refresh_token = self.module.params.get('refresh_token')
+ if refresh_token is not None:
+ try:
+ token = _request_token_using_refresh_token(self.module.params)
+ self.restheaders['Authorization'] = 'Bearer ' + token
+
+ r = make_request_catching_401()
+ except KeycloakError as e:
+ # Token refresh returns 400 if token is expired/invalid, so continue on if we get a 400
+ if e.authError is not None and e.authError.code != 400:
+ raise e
+
+ if isinstance(r, Exception):
+ # Try to re-auth with username/password, if available
+ auth_username = self.module.params.get('auth_username')
+ auth_password = self.module.params.get('auth_password')
+ if auth_username is not None and auth_password is not None:
+ token = _request_token_using_credentials(self.module.params)
+ self.restheaders['Authorization'] = 'Bearer ' + token
+
+ r = make_request_catching_401()
+
+ if isinstance(r, Exception):
+ # Try to re-auth with client_id and client_secret, if available
+ auth_client_id = self.module.params.get('auth_client_id')
+ auth_client_secret = self.module.params.get('auth_client_secret')
+ if auth_client_id is not None and auth_client_secret is not None:
+ try:
+ token = _request_token_using_client_credentials(self.module.params)
+ self.restheaders['Authorization'] = 'Bearer ' + token
+
+ r = make_request_catching_401()
+ except KeycloakError as e:
+ # Token refresh returns 400 if token is expired/invalid, so continue on if we get a 400
+ if e.authError is not None and e.authError.code != 400:
+ raise e
+
+ if isinstance(r, Exception):
+ # Either no re-auth options were available, or they all failed
+ raise r
+
+ return r
+
+ def _request_and_deserialize(self, url, method, data=None):
+ """ Wraps the _request method with JSON deserialization of the response.
+
+ :param url: request path
+ :param method: request method (e.g., 'GET', 'POST', etc.)
+ :param data: (optional) data for request
+ :return: raw API response
+ """
+ return json.loads(to_native(self._request(url, method, data).read()))
+
def get_realm_info_by_id(self, realm='master'):
""" Obtain realm public info by id
@@ -289,16 +456,14 @@ class KeycloakAPI(object):
realm_info_url = URL_REALM_INFO.format(url=self.baseurl, realm=realm)
try:
- return json.loads(to_native(open_url(realm_info_url, method='GET', http_agent=self.http_agent, headers=self.restheaders,
- timeout=self.connection_timeout,
- validate_certs=self.validate_certs).read()))
+ return self._request_and_deserialize(realm_info_url, method='GET')
except HTTPError as e:
if e.code == 404:
return None
else:
- self.fail_open_url(e, msg='Could not obtain realm %s: %s' % (realm, str(e)),
- exception=traceback.format_exc())
+ self.fail_request(e, msg='Could not obtain realm %s: %s' % (realm, str(e)),
+ exception=traceback.format_exc())
except ValueError as e:
self.module.fail_json(msg='API returned incorrect JSON when trying to obtain realm %s: %s' % (realm, str(e)),
exception=traceback.format_exc())
@@ -320,16 +485,14 @@ class KeycloakAPI(object):
realm_keys_metadata_url = URL_REALM_KEYS_METADATA.format(url=self.baseurl, realm=realm)
try:
- return json.loads(to_native(open_url(realm_keys_metadata_url, method='GET', http_agent=self.http_agent, headers=self.restheaders,
- timeout=self.connection_timeout,
- validate_certs=self.validate_certs).read()))
+ return self._request_and_deserialize(realm_keys_metadata_url, method="GET")
except HTTPError as e:
if e.code == 404:
return None
else:
- self.fail_open_url(e, msg='Could not obtain realm %s: %s' % (realm, str(e)),
- exception=traceback.format_exc())
+ self.fail_request(e, msg='Could not obtain realm %s: %s' % (realm, str(e)),
+ exception=traceback.format_exc())
except ValueError as e:
self.module.fail_json(msg='API returned incorrect JSON when trying to obtain realm %s: %s' % (realm, str(e)),
exception=traceback.format_exc())
@@ -337,6 +500,8 @@ class KeycloakAPI(object):
self.module.fail_json(msg='Could not obtain realm %s: %s' % (realm, str(e)),
exception=traceback.format_exc())
+ # The Keycloak API expects the realm name (like `master`) not the ID when fetching the realm data.
+ # See the Keycloak API docs: https://www.keycloak.org/docs-api/latest/rest-api/#_realms_admin
def get_realm_by_id(self, realm='master'):
""" Obtain realm representation by id
@@ -346,15 +511,14 @@ class KeycloakAPI(object):
realm_url = URL_REALM.format(url=self.baseurl, realm=realm)
try:
- return json.loads(to_native(open_url(realm_url, method='GET', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
- validate_certs=self.validate_certs).read()))
+ return self._request_and_deserialize(realm_url, method='GET')
except HTTPError as e:
if e.code == 404:
return None
else:
- self.fail_open_url(e, msg='Could not obtain realm %s: %s' % (realm, str(e)),
- exception=traceback.format_exc())
+ self.fail_request(e, msg='Could not obtain realm %s: %s' % (realm, str(e)),
+ exception=traceback.format_exc())
except ValueError as e:
self.module.fail_json(msg='API returned incorrect JSON when trying to obtain realm %s: %s' % (realm, str(e)),
exception=traceback.format_exc())
@@ -371,11 +535,10 @@ class KeycloakAPI(object):
realm_url = URL_REALM.format(url=self.baseurl, realm=realm)
try:
- return open_url(realm_url, method='PUT', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
- data=json.dumps(realmrep), validate_certs=self.validate_certs)
+ return self._request(realm_url, method='PUT', data=json.dumps(realmrep))
except Exception as e:
- self.fail_open_url(e, msg='Could not update realm %s: %s' % (realm, str(e)),
- exception=traceback.format_exc())
+ self.fail_request(e, msg='Could not update realm %s: %s' % (realm, str(e)),
+ exception=traceback.format_exc())
def create_realm(self, realmrep):
""" Create a realm in keycloak
@@ -385,11 +548,10 @@ class KeycloakAPI(object):
realm_url = URL_REALMS.format(url=self.baseurl)
try:
- return open_url(realm_url, method='POST', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
- data=json.dumps(realmrep), validate_certs=self.validate_certs)
+ return self._request(realm_url, method='POST', data=json.dumps(realmrep))
except Exception as e:
- self.fail_open_url(e, msg='Could not create realm %s: %s' % (realmrep['id'], str(e)),
- exception=traceback.format_exc())
+ self.fail_request(e, msg='Could not create realm %s: %s' % (realmrep['id'], str(e)),
+ exception=traceback.format_exc())
def delete_realm(self, realm="master"):
""" Delete a realm from Keycloak
@@ -400,11 +562,10 @@ class KeycloakAPI(object):
realm_url = URL_REALM.format(url=self.baseurl, realm=realm)
try:
- return open_url(realm_url, method='DELETE', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
- validate_certs=self.validate_certs)
+ return self._request(realm_url, method='DELETE')
except Exception as e:
- self.fail_open_url(e, msg='Could not delete realm %s: %s' % (realm, str(e)),
- exception=traceback.format_exc())
+ self.fail_request(e, msg='Could not delete realm %s: %s' % (realm, str(e)),
+ exception=traceback.format_exc())
def get_clients(self, realm='master', filter=None):
""" Obtains client representations for clients in a realm
@@ -418,15 +579,13 @@ class KeycloakAPI(object):
clientlist_url += '?clientId=%s' % filter
try:
- return json.loads(to_native(open_url(clientlist_url, http_agent=self.http_agent, method='GET', headers=self.restheaders,
- timeout=self.connection_timeout,
- validate_certs=self.validate_certs).read()))
+ return self._request_and_deserialize(clientlist_url, method='GET')
except ValueError as e:
self.module.fail_json(msg='API returned incorrect JSON when trying to obtain list of clients for realm %s: %s'
% (realm, str(e)))
except Exception as e:
- self.fail_open_url(e, msg='Could not obtain list of clients for realm %s: %s'
- % (realm, str(e)))
+ self.fail_request(e, msg='Could not obtain list of clients for realm %s: %s'
+ % (realm, str(e)))
def get_client_by_clientid(self, client_id, realm='master'):
""" Get client representation by clientId
@@ -450,16 +609,14 @@ class KeycloakAPI(object):
client_url = URL_CLIENT.format(url=self.baseurl, realm=realm, id=id)
try:
- return json.loads(to_native(open_url(client_url, method='GET', http_agent=self.http_agent, headers=self.restheaders,
- timeout=self.connection_timeout,
- validate_certs=self.validate_certs).read()))
+ return self._request_and_deserialize(client_url, method='GET')
except HTTPError as e:
if e.code == 404:
return None
else:
- self.fail_open_url(e, msg='Could not obtain client %s for realm %s: %s'
- % (id, realm, str(e)))
+ self.fail_request(e, msg='Could not obtain client %s for realm %s: %s'
+ % (id, realm, str(e)))
except ValueError as e:
self.module.fail_json(msg='API returned incorrect JSON when trying to obtain client %s for realm %s: %s'
% (id, realm, str(e)))
@@ -490,11 +647,10 @@ class KeycloakAPI(object):
client_url = URL_CLIENT.format(url=self.baseurl, realm=realm, id=id)
try:
- return open_url(client_url, method='PUT', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
- data=json.dumps(clientrep), validate_certs=self.validate_certs)
+ return self._request(client_url, method='PUT', data=json.dumps(clientrep))
except Exception as e:
- self.fail_open_url(e, msg='Could not update client %s in realm %s: %s'
- % (id, realm, str(e)))
+ self.fail_request(e, msg='Could not update client %s in realm %s: %s'
+ % (id, realm, str(e)))
def create_client(self, clientrep, realm="master"):
""" Create a client in keycloak
@@ -505,11 +661,10 @@ class KeycloakAPI(object):
client_url = URL_CLIENTS.format(url=self.baseurl, realm=realm)
try:
- return open_url(client_url, method='POST', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
- data=json.dumps(clientrep), validate_certs=self.validate_certs)
+ return self._request(client_url, method='POST', data=json.dumps(clientrep))
except Exception as e:
- self.fail_open_url(e, msg='Could not create client %s in realm %s: %s'
- % (clientrep['clientId'], realm, str(e)))
+ self.fail_request(e, msg='Could not create client %s in realm %s: %s'
+ % (clientrep['clientId'], realm, str(e)))
def delete_client(self, id, realm="master"):
""" Delete a client from Keycloak
@@ -521,11 +676,10 @@ class KeycloakAPI(object):
client_url = URL_CLIENT.format(url=self.baseurl, realm=realm, id=id)
try:
- return open_url(client_url, method='DELETE', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
- validate_certs=self.validate_certs)
+ return self._request(client_url, method='DELETE')
except Exception as e:
- self.fail_open_url(e, msg='Could not delete client %s in realm %s: %s'
- % (id, realm, str(e)))
+ self.fail_request(e, msg='Could not delete client %s in realm %s: %s'
+ % (id, realm, str(e)))
def get_client_roles_by_id(self, cid, realm="master"):
""" Fetch the roles of the a client on the Keycloak server.
@@ -536,12 +690,10 @@ class KeycloakAPI(object):
"""
client_roles_url = URL_CLIENT_ROLES.format(url=self.baseurl, realm=realm, id=cid)
try:
- return json.loads(to_native(open_url(client_roles_url, method="GET", http_agent=self.http_agent, headers=self.restheaders,
- timeout=self.connection_timeout,
- validate_certs=self.validate_certs).read()))
+ return self._request_and_deserialize(client_roles_url, method="GET")
except Exception as e:
- self.fail_open_url(e, msg="Could not fetch rolemappings for client %s in realm %s: %s"
- % (cid, realm, str(e)))
+ self.fail_request(e, msg="Could not fetch rolemappings for client %s in realm %s: %s"
+ % (cid, realm, str(e)))
def get_client_role_id_by_name(self, cid, name, realm="master"):
""" Get the role ID of a client.
@@ -568,15 +720,13 @@ class KeycloakAPI(object):
"""
rolemappings_url = URL_CLIENT_GROUP_ROLEMAPPINGS.format(url=self.baseurl, realm=realm, id=gid, client=cid)
try:
- rolemappings = json.loads(to_native(open_url(rolemappings_url, method="GET", http_agent=self.http_agent, headers=self.restheaders,
- timeout=self.connection_timeout,
- validate_certs=self.validate_certs).read()))
+ rolemappings = self._request_and_deserialize(rolemappings_url, method="GET")
for role in rolemappings:
if rid == role['id']:
return role
except Exception as e:
- self.fail_open_url(e, msg="Could not fetch rolemappings for client %s in group %s, realm %s: %s"
- % (cid, gid, realm, str(e)))
+ self.fail_request(e, msg="Could not fetch rolemappings for client %s in group %s, realm %s: %s"
+ % (cid, gid, realm, str(e)))
return None
def get_client_group_available_rolemappings(self, gid, cid, realm="master"):
@@ -589,12 +739,10 @@ class KeycloakAPI(object):
"""
available_rolemappings_url = URL_CLIENT_GROUP_ROLEMAPPINGS_AVAILABLE.format(url=self.baseurl, realm=realm, id=gid, client=cid)
try:
- return json.loads(to_native(open_url(available_rolemappings_url, method="GET", http_agent=self.http_agent, headers=self.restheaders,
- timeout=self.connection_timeout,
- validate_certs=self.validate_certs).read()))
+ return self._request_and_deserialize(available_rolemappings_url, method="GET")
except Exception as e:
- self.fail_open_url(e, msg="Could not fetch available rolemappings for client %s in group %s, realm %s: %s"
- % (cid, gid, realm, str(e)))
+ self.fail_request(e, msg="Could not fetch available rolemappings for client %s in group %s, realm %s: %s"
+ % (cid, gid, realm, str(e)))
def get_client_group_composite_rolemappings(self, gid, cid, realm="master"):
""" Fetch the composite role of a client in a specified group on the Keycloak server.
@@ -606,12 +754,10 @@ class KeycloakAPI(object):
"""
composite_rolemappings_url = URL_CLIENT_GROUP_ROLEMAPPINGS_COMPOSITE.format(url=self.baseurl, realm=realm, id=gid, client=cid)
try:
- return json.loads(to_native(open_url(composite_rolemappings_url, method="GET", http_agent=self.http_agent, headers=self.restheaders,
- timeout=self.connection_timeout,
- validate_certs=self.validate_certs).read()))
+ return self._request_and_deserialize(composite_rolemappings_url, method="GET")
except Exception as e:
- self.fail_open_url(e, msg="Could not fetch available rolemappings for client %s in group %s, realm %s: %s"
- % (cid, gid, realm, str(e)))
+ self.fail_request(e, msg="Could not fetch available rolemappings for client %s in group %s, realm %s: %s"
+ % (cid, gid, realm, str(e)))
def get_role_by_id(self, rid, realm="master"):
""" Fetch a role by its id on the Keycloak server.
@@ -622,12 +768,10 @@ class KeycloakAPI(object):
"""
client_roles_url = URL_ROLES_BY_ID.format(url=self.baseurl, realm=realm, id=rid)
try:
- return json.loads(to_native(open_url(client_roles_url, method="GET", http_agent=self.http_agent, headers=self.restheaders,
- timeout=self.connection_timeout,
- validate_certs=self.validate_certs).read()))
+ return self._request_and_deserialize(client_roles_url, method="GET")
except Exception as e:
- self.fail_open_url(e, msg="Could not fetch role for id %s in realm %s: %s"
- % (rid, realm, str(e)))
+ self.fail_request(e, msg="Could not fetch role for id %s in realm %s: %s"
+ % (rid, realm, str(e)))
def get_client_roles_by_id_composite_rolemappings(self, rid, cid, realm="master"):
""" Fetch a role by its id on the Keycloak server.
@@ -639,12 +783,10 @@ class KeycloakAPI(object):
"""
client_roles_url = URL_ROLES_BY_ID_COMPOSITES_CLIENTS.format(url=self.baseurl, realm=realm, id=rid, cid=cid)
try:
- return json.loads(to_native(open_url(client_roles_url, method="GET", http_agent=self.http_agent, headers=self.restheaders,
- timeout=self.connection_timeout,
- validate_certs=self.validate_certs).read()))
+ return self._request_and_deserialize(client_roles_url, method="GET")
except Exception as e:
- self.fail_open_url(e, msg="Could not fetch role for id %s and cid %s in realm %s: %s"
- % (rid, cid, realm, str(e)))
+ self.fail_request(e, msg="Could not fetch role for id %s and cid %s in realm %s: %s"
+ % (rid, cid, realm, str(e)))
def add_client_roles_by_id_composite_rolemapping(self, rid, roles_rep, realm="master"):
""" Assign roles to composite role
@@ -656,11 +798,10 @@ class KeycloakAPI(object):
"""
available_rolemappings_url = URL_ROLES_BY_ID_COMPOSITES.format(url=self.baseurl, realm=realm, id=rid)
try:
- open_url(available_rolemappings_url, method="POST", http_agent=self.http_agent, headers=self.restheaders, data=json.dumps(roles_rep),
- validate_certs=self.validate_certs, timeout=self.connection_timeout)
+ self._request(available_rolemappings_url, method="POST", data=json.dumps(roles_rep))
except Exception as e:
- self.fail_open_url(e, msg="Could not assign roles to composite role %s and realm %s: %s"
- % (rid, realm, str(e)))
+ self.fail_request(e, msg="Could not assign roles to composite role %s and realm %s: %s"
+ % (rid, realm, str(e)))
def add_group_realm_rolemapping(self, gid, role_rep, realm="master"):
""" Add the specified realm role to specified group on the Keycloak server.
@@ -672,11 +813,10 @@ class KeycloakAPI(object):
"""
url = URL_REALM_GROUP_ROLEMAPPINGS.format(url=self.baseurl, realm=realm, group=gid)
try:
- open_url(url, method="POST", http_agent=self.http_agent, headers=self.restheaders, data=json.dumps(role_rep),
- validate_certs=self.validate_certs, timeout=self.connection_timeout)
+ self._request(url, method="POST", data=json.dumps(role_rep))
except Exception as e:
- self.fail_open_url(e, msg="Could add realm role mappings for group %s, realm %s: %s"
- % (gid, realm, str(e)))
+ self.fail_request(e, msg="Could add realm role mappings for group %s, realm %s: %s"
+ % (gid, realm, str(e)))
def delete_group_realm_rolemapping(self, gid, role_rep, realm="master"):
""" Delete the specified realm role from the specified group on the Keycloak server.
@@ -688,11 +828,10 @@ class KeycloakAPI(object):
"""
url = URL_REALM_GROUP_ROLEMAPPINGS.format(url=self.baseurl, realm=realm, group=gid)
try:
- open_url(url, method="DELETE", http_agent=self.http_agent, headers=self.restheaders, data=json.dumps(role_rep),
- validate_certs=self.validate_certs, timeout=self.connection_timeout)
+ self._request(url, method="DELETE", data=json.dumps(role_rep))
except Exception as e:
- self.fail_open_url(e, msg="Could not delete realm role mappings for group %s, realm %s: %s"
- % (gid, realm, str(e)))
+ self.fail_request(e, msg="Could not delete realm role mappings for group %s, realm %s: %s"
+ % (gid, realm, str(e)))
def add_group_rolemapping(self, gid, cid, role_rep, realm="master"):
""" Fetch the composite role of a client in a specified group on the Keycloak server.
@@ -705,11 +844,10 @@ class KeycloakAPI(object):
"""
available_rolemappings_url = URL_CLIENT_GROUP_ROLEMAPPINGS.format(url=self.baseurl, realm=realm, id=gid, client=cid)
try:
- open_url(available_rolemappings_url, method="POST", http_agent=self.http_agent, headers=self.restheaders, data=json.dumps(role_rep),
- validate_certs=self.validate_certs, timeout=self.connection_timeout)
+ self._request(available_rolemappings_url, method="POST", data=json.dumps(role_rep))
except Exception as e:
- self.fail_open_url(e, msg="Could not fetch available rolemappings for client %s in group %s, realm %s: %s"
- % (cid, gid, realm, str(e)))
+ self.fail_request(e, msg="Could not fetch available rolemappings for client %s in group %s, realm %s: %s"
+ % (cid, gid, realm, str(e)))
def delete_group_rolemapping(self, gid, cid, role_rep, realm="master"):
""" Delete the rolemapping of a client in a specified group on the Keycloak server.
@@ -722,11 +860,10 @@ class KeycloakAPI(object):
"""
available_rolemappings_url = URL_CLIENT_GROUP_ROLEMAPPINGS.format(url=self.baseurl, realm=realm, id=gid, client=cid)
try:
- open_url(available_rolemappings_url, method="DELETE", http_agent=self.http_agent, headers=self.restheaders, data=json.dumps(role_rep),
- validate_certs=self.validate_certs, timeout=self.connection_timeout)
+ self._request(available_rolemappings_url, method="DELETE", data=json.dumps(role_rep))
except Exception as e:
- self.fail_open_url(e, msg="Could not delete available rolemappings for client %s in group %s, realm %s: %s"
- % (cid, gid, realm, str(e)))
+ self.fail_request(e, msg="Could not delete available rolemappings for client %s in group %s, realm %s: %s"
+ % (cid, gid, realm, str(e)))
def get_client_user_rolemapping_by_id(self, uid, cid, rid, realm='master'):
""" Obtain client representation by id
@@ -739,15 +876,13 @@ class KeycloakAPI(object):
"""
rolemappings_url = URL_CLIENT_USER_ROLEMAPPINGS.format(url=self.baseurl, realm=realm, id=uid, client=cid)
try:
- rolemappings = json.loads(to_native(open_url(rolemappings_url, method="GET", http_agent=self.http_agent, headers=self.restheaders,
- timeout=self.connection_timeout,
- validate_certs=self.validate_certs).read()))
+ rolemappings = self._request_and_deserialize(rolemappings_url, method="GET")
for role in rolemappings:
if rid == role['id']:
return role
except Exception as e:
- self.fail_open_url(e, msg="Could not fetch rolemappings for client %s and user %s, realm %s: %s"
- % (cid, uid, realm, str(e)))
+ self.fail_request(e, msg="Could not fetch rolemappings for client %s and user %s, realm %s: %s"
+ % (cid, uid, realm, str(e)))
return None
def get_client_user_available_rolemappings(self, uid, cid, realm="master"):
@@ -760,12 +895,10 @@ class KeycloakAPI(object):
"""
available_rolemappings_url = URL_CLIENT_USER_ROLEMAPPINGS_AVAILABLE.format(url=self.baseurl, realm=realm, id=uid, client=cid)
try:
- return json.loads(to_native(open_url(available_rolemappings_url, method="GET", http_agent=self.http_agent, headers=self.restheaders,
- timeout=self.connection_timeout,
- validate_certs=self.validate_certs).read()))
+ return self._request_and_deserialize(available_rolemappings_url, method="GET")
except Exception as e:
- self.fail_open_url(e, msg="Could not fetch effective rolemappings for client %s and user %s, realm %s: %s"
- % (cid, uid, realm, str(e)))
+ self.fail_request(e, msg="Could not fetch effective rolemappings for client %s and user %s, realm %s: %s"
+ % (cid, uid, realm, str(e)))
def get_client_user_composite_rolemappings(self, uid, cid, realm="master"):
""" Fetch the composite role of a client for a specified user on the Keycloak server.
@@ -777,12 +910,10 @@ class KeycloakAPI(object):
"""
composite_rolemappings_url = URL_CLIENT_USER_ROLEMAPPINGS_COMPOSITE.format(url=self.baseurl, realm=realm, id=uid, client=cid)
try:
- return json.loads(to_native(open_url(composite_rolemappings_url, method="GET", http_agent=self.http_agent, headers=self.restheaders,
- timeout=self.connection_timeout,
- validate_certs=self.validate_certs).read()))
+ return self._request_and_deserialize(composite_rolemappings_url, method="GET")
except Exception as e:
- self.fail_open_url(e, msg="Could not fetch available rolemappings for user %s of realm %s: %s"
- % (uid, realm, str(e)))
+ self.fail_request(e, msg="Could not fetch available rolemappings for user %s of realm %s: %s"
+ % (uid, realm, str(e)))
def get_realm_user_rolemapping_by_id(self, uid, rid, realm='master'):
""" Obtain role representation by id
@@ -794,15 +925,13 @@ class KeycloakAPI(object):
"""
rolemappings_url = URL_REALM_ROLEMAPPINGS.format(url=self.baseurl, realm=realm, id=uid)
try:
- rolemappings = json.loads(to_native(open_url(rolemappings_url, method="GET", http_agent=self.http_agent, headers=self.restheaders,
- timeout=self.connection_timeout,
- validate_certs=self.validate_certs).read()))
+ rolemappings = self._request_and_deserialize(rolemappings_url, method="GET")
for role in rolemappings:
if rid == role['id']:
return role
except Exception as e:
- self.fail_open_url(e, msg="Could not fetch rolemappings for user %s, realm %s: %s"
- % (uid, realm, str(e)))
+ self.fail_request(e, msg="Could not fetch rolemappings for user %s, realm %s: %s"
+ % (uid, realm, str(e)))
return None
def get_realm_user_available_rolemappings(self, uid, realm="master"):
@@ -814,12 +943,10 @@ class KeycloakAPI(object):
"""
available_rolemappings_url = URL_REALM_ROLEMAPPINGS_AVAILABLE.format(url=self.baseurl, realm=realm, id=uid)
try:
- return json.loads(to_native(open_url(available_rolemappings_url, method="GET", http_agent=self.http_agent, headers=self.restheaders,
- timeout=self.connection_timeout,
- validate_certs=self.validate_certs).read()))
+ return self._request_and_deserialize(available_rolemappings_url, method="GET")
except Exception as e:
- self.fail_open_url(e, msg="Could not fetch available rolemappings for user %s of realm %s: %s"
- % (uid, realm, str(e)))
+ self.fail_request(e, msg="Could not fetch available rolemappings for user %s of realm %s: %s"
+ % (uid, realm, str(e)))
def get_realm_user_composite_rolemappings(self, uid, realm="master"):
""" Fetch the composite role of a realm for a specified user on the Keycloak server.
@@ -830,12 +957,10 @@ class KeycloakAPI(object):
"""
composite_rolemappings_url = URL_REALM_ROLEMAPPINGS_COMPOSITE.format(url=self.baseurl, realm=realm, id=uid)
try:
- return json.loads(to_native(open_url(composite_rolemappings_url, method="GET", http_agent=self.http_agent, headers=self.restheaders,
- timeout=self.connection_timeout,
- validate_certs=self.validate_certs).read()))
+ return self._request_and_deserialize(composite_rolemappings_url, method="GET")
except Exception as e:
- self.fail_open_url(e, msg="Could not fetch effective rolemappings for user %s, realm %s: %s"
- % (uid, realm, str(e)))
+ self.fail_request(e, msg="Could not fetch effective rolemappings for user %s, realm %s: %s"
+ % (uid, realm, str(e)))
def get_user_by_username(self, username, realm="master"):
""" Fetch a keycloak user within a realm based on its username.
@@ -848,9 +973,7 @@ class KeycloakAPI(object):
users_url += '?username=%s&exact=true' % username
try:
userrep = None
- users = json.loads(to_native(open_url(users_url, method='GET', http_agent=self.http_agent, headers=self.restheaders,
- timeout=self.connection_timeout,
- validate_certs=self.validate_certs).read()))
+ users = self._request_and_deserialize(users_url, method='GET')
for user in users:
if user['username'] == username:
userrep = user
@@ -861,8 +984,8 @@ class KeycloakAPI(object):
self.module.fail_json(msg='API returned incorrect JSON when trying to obtain the user for realm %s and username %s: %s'
% (realm, username, str(e)))
except Exception as e:
- self.fail_open_url(e, msg='Could not obtain the user for realm %s and username %s: %s'
- % (realm, username, str(e)))
+ self.fail_request(e, msg='Could not obtain the user for realm %s and username %s: %s'
+ % (realm, username, str(e)))
def get_service_account_user_by_client_id(self, client_id, realm="master"):
""" Fetch a keycloak service account user within a realm based on its client_id.
@@ -875,15 +998,13 @@ class KeycloakAPI(object):
service_account_user_url = URL_CLIENT_SERVICE_ACCOUNT_USER.format(url=self.baseurl, realm=realm, id=cid)
try:
- return json.loads(to_native(open_url(service_account_user_url, method='GET', http_agent=self.http_agent, headers=self.restheaders,
- timeout=self.connection_timeout,
- validate_certs=self.validate_certs).read()))
+ return self._request_and_deserialize(service_account_user_url, method='GET')
except ValueError as e:
self.module.fail_json(msg='API returned incorrect JSON when trying to obtain the service-account-user for realm %s and client_id %s: %s'
% (realm, client_id, str(e)))
except Exception as e:
- self.fail_open_url(e, msg='Could not obtain the service-account-user for realm %s and client_id %s: %s'
- % (realm, client_id, str(e)))
+ self.fail_request(e, msg='Could not obtain the service-account-user for realm %s and client_id %s: %s'
+ % (realm, client_id, str(e)))
def add_user_rolemapping(self, uid, cid, role_rep, realm="master"):
""" Assign a realm or client role to a specified user on the Keycloak server.
@@ -897,19 +1018,17 @@ class KeycloakAPI(object):
if cid is None:
user_realm_rolemappings_url = URL_REALM_ROLEMAPPINGS.format(url=self.baseurl, realm=realm, id=uid)
try:
- open_url(user_realm_rolemappings_url, method="POST", http_agent=self.http_agent, headers=self.restheaders, data=json.dumps(role_rep),
- validate_certs=self.validate_certs, timeout=self.connection_timeout)
+ self._request(user_realm_rolemappings_url, method="POST", data=json.dumps(role_rep))
except Exception as e:
- self.fail_open_url(e, msg="Could not map roles to userId %s for realm %s and roles %s: %s"
- % (uid, realm, json.dumps(role_rep), str(e)))
+ self.fail_request(e, msg="Could not map roles to userId %s for realm %s and roles %s: %s"
+ % (uid, realm, json.dumps(role_rep), str(e)))
else:
user_client_rolemappings_url = URL_CLIENT_USER_ROLEMAPPINGS.format(url=self.baseurl, realm=realm, id=uid, client=cid)
try:
- open_url(user_client_rolemappings_url, method="POST", http_agent=self.http_agent, headers=self.restheaders, data=json.dumps(role_rep),
- validate_certs=self.validate_certs, timeout=self.connection_timeout)
+ self._request(user_client_rolemappings_url, method="POST", data=json.dumps(role_rep))
except Exception as e:
- self.fail_open_url(e, msg="Could not map roles to userId %s for client %s, realm %s and roles %s: %s"
- % (cid, uid, realm, json.dumps(role_rep), str(e)))
+ self.fail_request(e, msg="Could not map roles to userId %s for client %s, realm %s and roles %s: %s"
+ % (cid, uid, realm, json.dumps(role_rep), str(e)))
def delete_user_rolemapping(self, uid, cid, role_rep, realm="master"):
""" Delete the rolemapping of a client in a specified user on the Keycloak server.
@@ -923,19 +1042,17 @@ class KeycloakAPI(object):
if cid is None:
user_realm_rolemappings_url = URL_REALM_ROLEMAPPINGS.format(url=self.baseurl, realm=realm, id=uid)
try:
- open_url(user_realm_rolemappings_url, method="DELETE", http_agent=self.http_agent, headers=self.restheaders, data=json.dumps(role_rep),
- validate_certs=self.validate_certs, timeout=self.connection_timeout)
+ self._request(user_realm_rolemappings_url, method="DELETE", data=json.dumps(role_rep))
except Exception as e:
- self.fail_open_url(e, msg="Could not remove roles %s from userId %s, realm %s: %s"
- % (json.dumps(role_rep), uid, realm, str(e)))
+ self.fail_request(e, msg="Could not remove roles %s from userId %s, realm %s: %s"
+ % (json.dumps(role_rep), uid, realm, str(e)))
else:
user_client_rolemappings_url = URL_CLIENT_USER_ROLEMAPPINGS.format(url=self.baseurl, realm=realm, id=uid, client=cid)
try:
- open_url(user_client_rolemappings_url, method="DELETE", http_agent=self.http_agent, headers=self.restheaders, data=json.dumps(role_rep),
- validate_certs=self.validate_certs, timeout=self.connection_timeout)
+ self._request(user_client_rolemappings_url, method="DELETE", data=json.dumps(role_rep))
except Exception as e:
- self.fail_open_url(e, msg="Could not remove roles %s for client %s from userId %s, realm %s: %s"
- % (json.dumps(role_rep), cid, uid, realm, str(e)))
+ self.fail_request(e, msg="Could not remove roles %s for client %s from userId %s, realm %s: %s"
+ % (json.dumps(role_rep), cid, uid, realm, str(e)))
def get_client_templates(self, realm='master'):
""" Obtains client template representations for client templates in a realm
@@ -946,14 +1063,13 @@ class KeycloakAPI(object):
url = URL_CLIENTTEMPLATES.format(url=self.baseurl, realm=realm)
try:
- return json.loads(to_native(open_url(url, method='GET', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
- validate_certs=self.validate_certs).read()))
+ return self._request_and_deserialize(url, method='GET')
except ValueError as e:
self.module.fail_json(msg='API returned incorrect JSON when trying to obtain list of client templates for realm %s: %s'
% (realm, str(e)))
except Exception as e:
- self.fail_open_url(e, msg='Could not obtain list of client templates for realm %s: %s'
- % (realm, str(e)))
+ self.fail_request(e, msg='Could not obtain list of client templates for realm %s: %s'
+ % (realm, str(e)))
def get_client_template_by_id(self, id, realm='master'):
""" Obtain client template representation by id
@@ -965,14 +1081,13 @@ class KeycloakAPI(object):
url = URL_CLIENTTEMPLATE.format(url=self.baseurl, id=id, realm=realm)
try:
- return json.loads(to_native(open_url(url, method='GET', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
- validate_certs=self.validate_certs).read()))
+ return self._request_and_deserialize(url, method='GET')
except ValueError as e:
self.module.fail_json(msg='API returned incorrect JSON when trying to obtain client templates %s for realm %s: %s'
% (id, realm, str(e)))
except Exception as e:
- self.fail_open_url(e, msg='Could not obtain client template %s for realm %s: %s'
- % (id, realm, str(e)))
+ self.fail_request(e, msg='Could not obtain client template %s for realm %s: %s'
+ % (id, realm, str(e)))
def get_client_template_by_name(self, name, realm='master'):
""" Obtain client template representation by name
@@ -1011,11 +1126,10 @@ class KeycloakAPI(object):
url = URL_CLIENTTEMPLATE.format(url=self.baseurl, realm=realm, id=id)
try:
- return open_url(url, method='PUT', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
- data=json.dumps(clienttrep), validate_certs=self.validate_certs)
+ return self._request(url, method='PUT', data=json.dumps(clienttrep))
except Exception as e:
- self.fail_open_url(e, msg='Could not update client template %s in realm %s: %s'
- % (id, realm, str(e)))
+ self.fail_request(e, msg='Could not update client template %s in realm %s: %s'
+ % (id, realm, str(e)))
def create_client_template(self, clienttrep, realm="master"):
""" Create a client in keycloak
@@ -1026,11 +1140,10 @@ class KeycloakAPI(object):
url = URL_CLIENTTEMPLATES.format(url=self.baseurl, realm=realm)
try:
- return open_url(url, method='POST', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
- data=json.dumps(clienttrep), validate_certs=self.validate_certs)
+ return self._request(url, method='POST', data=json.dumps(clienttrep))
except Exception as e:
- self.fail_open_url(e, msg='Could not create client template %s in realm %s: %s'
- % (clienttrep['clientId'], realm, str(e)))
+ self.fail_request(e, msg='Could not create client template %s in realm %s: %s'
+ % (clienttrep['clientId'], realm, str(e)))
def delete_client_template(self, id, realm="master"):
""" Delete a client template from Keycloak
@@ -1042,11 +1155,10 @@ class KeycloakAPI(object):
url = URL_CLIENTTEMPLATE.format(url=self.baseurl, realm=realm, id=id)
try:
- return open_url(url, method='DELETE', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
- validate_certs=self.validate_certs)
+ return self._request(url, method='DELETE')
except Exception as e:
- self.fail_open_url(e, msg='Could not delete client template %s in realm %s: %s'
- % (id, realm, str(e)))
+ self.fail_request(e, msg='Could not delete client template %s in realm %s: %s'
+ % (id, realm, str(e)))
def get_clientscopes(self, realm="master"):
""" Fetch the name and ID of all clientscopes on the Keycloak server.
@@ -1059,12 +1171,10 @@ class KeycloakAPI(object):
"""
clientscopes_url = URL_CLIENTSCOPES.format(url=self.baseurl, realm=realm)
try:
- return json.loads(to_native(open_url(clientscopes_url, method="GET", http_agent=self.http_agent, headers=self.restheaders,
- timeout=self.connection_timeout,
- validate_certs=self.validate_certs).read()))
+ return self._request_and_deserialize(clientscopes_url, method="GET")
except Exception as e:
- self.fail_open_url(e, msg="Could not fetch list of clientscopes in realm %s: %s"
- % (realm, str(e)))
+ self.fail_request(e, msg="Could not fetch list of clientscopes in realm %s: %s"
+ % (realm, str(e)))
def get_clientscope_by_clientscopeid(self, cid, realm="master"):
""" Fetch a keycloak clientscope from the provided realm using the clientscope's unique ID.
@@ -1077,16 +1187,14 @@ class KeycloakAPI(object):
"""
clientscope_url = URL_CLIENTSCOPE.format(url=self.baseurl, realm=realm, id=cid)
try:
- return json.loads(to_native(open_url(clientscope_url, method="GET", http_agent=self.http_agent, headers=self.restheaders,
- timeout=self.connection_timeout,
- validate_certs=self.validate_certs).read()))
+ return self._request_and_deserialize(clientscope_url, method="GET")
except HTTPError as e:
if e.code == 404:
return None
else:
- self.fail_open_url(e, msg="Could not fetch clientscope %s in realm %s: %s"
- % (cid, realm, str(e)))
+ self.fail_request(e, msg="Could not fetch clientscope %s in realm %s: %s"
+ % (cid, realm, str(e)))
except Exception as e:
self.module.fail_json(msg="Could not clientscope group %s in realm %s: %s"
% (cid, realm, str(e)))
@@ -1123,11 +1231,10 @@ class KeycloakAPI(object):
"""
clientscopes_url = URL_CLIENTSCOPES.format(url=self.baseurl, realm=realm)
try:
- return open_url(clientscopes_url, method='POST', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
- data=json.dumps(clientscoperep), validate_certs=self.validate_certs)
+ return self._request(clientscopes_url, method='POST', data=json.dumps(clientscoperep))
except Exception as e:
- self.fail_open_url(e, msg="Could not create clientscope %s in realm %s: %s"
- % (clientscoperep['name'], realm, str(e)))
+ self.fail_request(e, msg="Could not create clientscope %s in realm %s: %s"
+ % (clientscoperep['name'], realm, str(e)))
def update_clientscope(self, clientscoperep, realm="master"):
""" Update an existing clientscope.
@@ -1138,12 +1245,11 @@ class KeycloakAPI(object):
clientscope_url = URL_CLIENTSCOPE.format(url=self.baseurl, realm=realm, id=clientscoperep['id'])
try:
- return open_url(clientscope_url, method='PUT', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
- data=json.dumps(clientscoperep), validate_certs=self.validate_certs)
+ return self._request(clientscope_url, method='PUT', data=json.dumps(clientscoperep))
except Exception as e:
- self.fail_open_url(e, msg='Could not update clientscope %s in realm %s: %s'
- % (clientscoperep['name'], realm, str(e)))
+ self.fail_request(e, msg='Could not update clientscope %s in realm %s: %s'
+ % (clientscoperep['name'], realm, str(e)))
def delete_clientscope(self, name=None, cid=None, realm="master"):
""" Delete a clientscope. One of name or cid must be provided.
@@ -1160,8 +1266,8 @@ class KeycloakAPI(object):
# prefer an exception since this is almost certainly a programming error in the module itself.
raise Exception("Unable to delete group - one of group ID or name must be provided.")
- # only lookup the name if cid isn't provided.
- # in the case that both are provided, prefer the ID, since it's one
+ # only lookup the name if cid is not provided.
+ # in the case that both are provided, prefer the ID, since it is one
# less lookup.
if cid is None and name is not None:
for clientscope in self.get_clientscopes(realm=realm):
@@ -1176,11 +1282,10 @@ class KeycloakAPI(object):
# should have a good cid by here.
clientscope_url = URL_CLIENTSCOPE.format(realm=realm, id=cid, url=self.baseurl)
try:
- return open_url(clientscope_url, method='DELETE', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
- validate_certs=self.validate_certs)
+ return self._request(clientscope_url, method='DELETE')
except Exception as e:
- self.fail_open_url(e, msg="Unable to delete clientscope %s: %s" % (cid, str(e)))
+ self.fail_request(e, msg="Unable to delete clientscope %s: %s" % (cid, str(e)))
def get_clientscope_protocolmappers(self, cid, realm="master"):
""" Fetch the name and ID of all clientscopes on the Keycloak server.
@@ -1194,12 +1299,10 @@ class KeycloakAPI(object):
"""
protocolmappers_url = URL_CLIENTSCOPE_PROTOCOLMAPPERS.format(id=cid, url=self.baseurl, realm=realm)
try:
- return json.loads(to_native(open_url(protocolmappers_url, method="GET", http_agent=self.http_agent, headers=self.restheaders,
- timeout=self.connection_timeout,
- validate_certs=self.validate_certs).read()))
+ return self._request_and_deserialize(protocolmappers_url, method="GET")
except Exception as e:
- self.fail_open_url(e, msg="Could not fetch list of protocolmappers in realm %s: %s"
- % (realm, str(e)))
+ self.fail_request(e, msg="Could not fetch list of protocolmappers in realm %s: %s"
+ % (realm, str(e)))
def get_clientscope_protocolmapper_by_protocolmapperid(self, pid, cid, realm="master"):
""" Fetch a keycloak clientscope from the provided realm using the clientscope's unique ID.
@@ -1214,16 +1317,14 @@ class KeycloakAPI(object):
"""
protocolmapper_url = URL_CLIENTSCOPE_PROTOCOLMAPPER.format(url=self.baseurl, realm=realm, id=cid, mapper_id=pid)
try:
- return json.loads(to_native(open_url(protocolmapper_url, method="GET", http_agent=self.http_agent, headers=self.restheaders,
- timeout=self.connection_timeout,
- validate_certs=self.validate_certs).read()))
+ return self._request_and_deserialize(protocolmapper_url, method="GET")
except HTTPError as e:
if e.code == 404:
return None
else:
- self.fail_open_url(e, msg="Could not fetch protocolmapper %s in realm %s: %s"
- % (pid, realm, str(e)))
+ self.fail_request(e, msg="Could not fetch protocolmapper %s in realm %s: %s"
+ % (pid, realm, str(e)))
except Exception as e:
self.module.fail_json(msg="Could not fetch protocolmapper %s in realm %s: %s"
% (cid, realm, str(e)))
@@ -1262,11 +1363,10 @@ class KeycloakAPI(object):
"""
protocolmappers_url = URL_CLIENTSCOPE_PROTOCOLMAPPERS.format(url=self.baseurl, id=cid, realm=realm)
try:
- return open_url(protocolmappers_url, method='POST', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
- data=json.dumps(mapper_rep), validate_certs=self.validate_certs)
+ return self._request(protocolmappers_url, method='POST', data=json.dumps(mapper_rep))
except Exception as e:
- self.fail_open_url(e, msg="Could not create protocolmapper %s in realm %s: %s"
- % (mapper_rep['name'], realm, str(e)))
+ self.fail_request(e, msg="Could not create protocolmapper %s in realm %s: %s"
+ % (mapper_rep['name'], realm, str(e)))
def update_clientscope_protocolmappers(self, cid, mapper_rep, realm="master"):
""" Update an existing clientscope.
@@ -1278,12 +1378,11 @@ class KeycloakAPI(object):
protocolmapper_url = URL_CLIENTSCOPE_PROTOCOLMAPPER.format(url=self.baseurl, realm=realm, id=cid, mapper_id=mapper_rep['id'])
try:
- return open_url(protocolmapper_url, method='PUT', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
- data=json.dumps(mapper_rep), validate_certs=self.validate_certs)
+ return self._request(protocolmapper_url, method='PUT', data=json.dumps(mapper_rep))
except Exception as e:
- self.fail_open_url(e, msg='Could not update protocolmappers for clientscope %s in realm %s: %s'
- % (mapper_rep, realm, str(e)))
+ self.fail_request(e, msg='Could not update protocolmappers for clientscope %s in realm %s: %s'
+ % (mapper_rep, realm, str(e)))
def get_default_clientscopes(self, realm, client_id=None):
"""Fetch the name and ID of all clientscopes on the Keycloak server.
@@ -1326,18 +1425,16 @@ class KeycloakAPI(object):
if client_id is None:
clientscopes_url = url_template.format(url=self.baseurl, realm=realm)
try:
- return json.loads(to_native(open_url(clientscopes_url, method="GET", http_agent=self.http_agent, headers=self.restheaders,
- timeout=self.connection_timeout, validate_certs=self.validate_certs).read()))
+ return self._request_and_deserialize(clientscopes_url, method="GET")
except Exception as e:
- self.fail_open_url(e, msg="Could not fetch list of %s clientscopes in realm %s: %s" % (scope_type, realm, str(e)))
+ self.fail_request(e, msg="Could not fetch list of %s clientscopes in realm %s: %s" % (scope_type, realm, str(e)))
else:
cid = self.get_client_id(client_id=client_id, realm=realm)
clientscopes_url = url_template.format(url=self.baseurl, realm=realm, cid=cid)
try:
- return json.loads(to_native(open_url(clientscopes_url, method="GET", http_agent=self.http_agent, headers=self.restheaders,
- timeout=self.connection_timeout, validate_certs=self.validate_certs).read()))
+ return self._request_and_deserialize(clientscopes_url, method="GET")
except Exception as e:
- self.fail_open_url(e, msg="Could not fetch list of %s clientscopes in client %s: %s" % (scope_type, client_id, clientscopes_url))
+ self.fail_request(e, msg="Could not fetch list of %s clientscopes in client %s: %s" % (scope_type, client_id, clientscopes_url))
def _decide_url_type_clientscope(self, client_id=None, scope_type="default"):
"""Decides which url to use.
@@ -1403,12 +1500,11 @@ class KeycloakAPI(object):
clientscope_type_url = self._decide_url_type_clientscope(client_id, scope_type).format(realm=realm, id=id, cid=cid, url=self.baseurl)
try:
method = 'PUT' if action == "add" else 'DELETE'
- return open_url(clientscope_type_url, method=method, http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
- validate_certs=self.validate_certs)
+ return self._request(clientscope_type_url, method=method)
except Exception as e:
place = 'realm' if client_id is None else 'client ' + client_id
- self.fail_open_url(e, msg="Unable to %s %s clientscope %s @ %s : %s" % (action, scope_type, id, place, str(e)))
+ self.fail_request(e, msg="Unable to %s %s clientscope %s @ %s : %s" % (action, scope_type, id, place, str(e)))
def create_clientsecret(self, id, realm="master"):
""" Generate a new client secret by id
@@ -1420,16 +1516,14 @@ class KeycloakAPI(object):
clientsecret_url = URL_CLIENTSECRET.format(url=self.baseurl, realm=realm, id=id)
try:
- return json.loads(to_native(open_url(clientsecret_url, method='POST', http_agent=self.http_agent, headers=self.restheaders,
- timeout=self.connection_timeout,
- validate_certs=self.validate_certs).read()))
+ return self._request_and_deserialize(clientsecret_url, method='POST')
except HTTPError as e:
if e.code == 404:
return None
else:
- self.fail_open_url(e, msg='Could not obtain clientsecret of client %s for realm %s: %s'
- % (id, realm, str(e)))
+ self.fail_request(e, msg='Could not obtain clientsecret of client %s for realm %s: %s'
+ % (id, realm, str(e)))
except Exception as e:
self.module.fail_json(msg='Could not obtain clientsecret of client %s for realm %s: %s'
% (id, realm, str(e)))
@@ -1444,16 +1538,14 @@ class KeycloakAPI(object):
clientsecret_url = URL_CLIENTSECRET.format(url=self.baseurl, realm=realm, id=id)
try:
- return json.loads(to_native(open_url(clientsecret_url, method='GET', http_agent=self.http_agent, headers=self.restheaders,
- timeout=self.connection_timeout,
- validate_certs=self.validate_certs).read()))
+ return self._request_and_deserialize(clientsecret_url, method='GET')
except HTTPError as e:
if e.code == 404:
return None
else:
- self.fail_open_url(e, msg='Could not obtain clientsecret of client %s for realm %s: %s'
- % (id, realm, str(e)))
+ self.fail_request(e, msg='Could not obtain clientsecret of client %s for realm %s: %s'
+ % (id, realm, str(e)))
except Exception as e:
self.module.fail_json(msg='Could not obtain clientsecret of client %s for realm %s: %s'
% (id, realm, str(e)))
@@ -1468,12 +1560,10 @@ class KeycloakAPI(object):
"""
groups_url = URL_GROUPS.format(url=self.baseurl, realm=realm)
try:
- return json.loads(to_native(open_url(groups_url, method="GET", http_agent=self.http_agent, headers=self.restheaders,
- timeout=self.connection_timeout,
- validate_certs=self.validate_certs).read()))
+ return self._request_and_deserialize(groups_url, method="GET")
except Exception as e:
- self.fail_open_url(e, msg="Could not fetch list of groups in realm %s: %s"
- % (realm, str(e)))
+ self.fail_request(e, msg="Could not fetch list of groups in realm %s: %s"
+ % (realm, str(e)))
def get_group_by_groupid(self, gid, realm="master"):
""" Fetch a keycloak group from the provided realm using the group's unique ID.
@@ -1486,15 +1576,13 @@ class KeycloakAPI(object):
"""
groups_url = URL_GROUP.format(url=self.baseurl, realm=realm, groupid=gid)
try:
- return json.loads(to_native(open_url(groups_url, method="GET", http_agent=self.http_agent, headers=self.restheaders,
- timeout=self.connection_timeout,
- validate_certs=self.validate_certs).read()))
+ return self._request_and_deserialize(groups_url, method="GET")
except HTTPError as e:
if e.code == 404:
return None
else:
- self.fail_open_url(e, msg="Could not fetch group %s in realm %s: %s"
- % (gid, realm, str(e)))
+ self.fail_request(e, msg="Could not fetch group %s in realm %s: %s"
+ % (gid, realm, str(e)))
except Exception as e:
self.module.fail_json(msg="Could not fetch group %s in realm %s: %s"
% (gid, realm, str(e)))
@@ -1507,10 +1595,8 @@ class KeycloakAPI(object):
if parent['subGroupCount'] == 0:
group_children = []
else:
- group_children_url = URL_GROUP_CHILDREN.format(url=self.baseurl, realm=realm, groupid=parent['id'])
- group_children = json.loads(to_native(open_url(group_children_url, method="GET", http_agent=self.http_agent, headers=self.restheaders,
- timeout=self.connection_timeout,
- validate_certs=self.validate_certs).read()))
+ group_children_url = URL_GROUP_CHILDREN.format(url=self.baseurl, realm=realm, groupid=parent['id']) + "?max=" + str(parent['subGroupCount'])
+ group_children = self._request_and_deserialize(group_children_url, method="GET")
subgroups = group_children
else:
subgroups = parent['subGroups']
@@ -1528,7 +1614,6 @@ class KeycloakAPI(object):
:param realm: Realm in which the group resides; default 'master'
:param parents: Optional list of parents when group to look for is a subgroup
"""
- groups_url = URL_GROUPS.format(url=self.baseurl, realm=realm)
try:
if parents:
parent = self.get_subgroup_direct_parent(parents, realm)
@@ -1654,11 +1739,10 @@ class KeycloakAPI(object):
"""
groups_url = URL_GROUPS.format(url=self.baseurl, realm=realm)
try:
- return open_url(groups_url, method='POST', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
- data=json.dumps(grouprep), validate_certs=self.validate_certs)
+ return self._request(groups_url, method='POST', data=json.dumps(grouprep))
except Exception as e:
- self.fail_open_url(e, msg="Could not create group %s in realm %s: %s"
- % (grouprep['name'], realm, str(e)))
+ self.fail_request(e, msg="Could not create group %s in realm %s: %s"
+ % (grouprep['name'], realm, str(e)))
def create_subgroup(self, parents, grouprep, realm="master"):
""" Create a Keycloak subgroup.
@@ -1682,11 +1766,10 @@ class KeycloakAPI(object):
parent_id = parent_id["id"]
url = URL_GROUP_CHILDREN.format(url=self.baseurl, realm=realm, groupid=parent_id)
- return open_url(url, method='POST', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
- data=json.dumps(grouprep), validate_certs=self.validate_certs)
+ return self._request(url, method='POST', data=json.dumps(grouprep))
except Exception as e:
- self.fail_open_url(e, msg="Could not create subgroup %s for parent group %s in realm %s: %s"
- % (grouprep['name'], parent_id, realm, str(e)))
+ self.fail_request(e, msg="Could not create subgroup %s for parent group %s in realm %s: %s"
+ % (grouprep['name'], parent_id, realm, str(e)))
def update_group(self, grouprep, realm="master"):
""" Update an existing group.
@@ -1697,11 +1780,10 @@ class KeycloakAPI(object):
group_url = URL_GROUP.format(url=self.baseurl, realm=realm, groupid=grouprep['id'])
try:
- return open_url(group_url, method='PUT', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
- data=json.dumps(grouprep), validate_certs=self.validate_certs)
+ return self._request(group_url, method='PUT', data=json.dumps(grouprep))
except Exception as e:
- self.fail_open_url(e, msg='Could not update group %s in realm %s: %s'
- % (grouprep['name'], realm, str(e)))
+ self.fail_request(e, msg='Could not update group %s in realm %s: %s'
+ % (grouprep['name'], realm, str(e)))
def delete_group(self, name=None, groupid=None, realm="master"):
""" Delete a group. One of name or groupid must be provided.
@@ -1719,7 +1801,7 @@ class KeycloakAPI(object):
raise Exception("Unable to delete group - one of group ID or name must be provided.")
# only lookup the name if groupid isn't provided.
- # in the case that both are provided, prefer the ID, since it's one
+ # in the case that both are provided, prefer the ID, since it is one
# less lookup.
if groupid is None and name is not None:
for group in self.get_groups(realm=realm):
@@ -1734,10 +1816,9 @@ class KeycloakAPI(object):
# should have a good groupid by here.
group_url = URL_GROUP.format(realm=realm, groupid=groupid, url=self.baseurl)
try:
- return open_url(group_url, method='DELETE', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
- validate_certs=self.validate_certs)
+ return self._request(group_url, method='DELETE')
except Exception as e:
- self.fail_open_url(e, msg="Unable to delete group %s: %s" % (groupid, str(e)))
+ self.fail_request(e, msg="Unable to delete group %s: %s" % (groupid, str(e)))
def get_realm_roles(self, realm='master'):
""" Obtains role representations for roles in a realm
@@ -1747,15 +1828,13 @@ class KeycloakAPI(object):
"""
rolelist_url = URL_REALM_ROLES.format(url=self.baseurl, realm=realm)
try:
- return json.loads(to_native(open_url(rolelist_url, method='GET', http_agent=self.http_agent, headers=self.restheaders,
- timeout=self.connection_timeout,
- validate_certs=self.validate_certs).read()))
+ return self._request_and_deserialize(rolelist_url, method='GET')
except ValueError as e:
self.module.fail_json(msg='API returned incorrect JSON when trying to obtain list of roles for realm %s: %s'
% (realm, str(e)))
except Exception as e:
- self.fail_open_url(e, msg='Could not obtain list of roles for realm %s: %s'
- % (realm, str(e)))
+ self.fail_request(e, msg='Could not obtain list of roles for realm %s: %s'
+ % (realm, str(e)))
def get_realm_role(self, name, realm='master'):
""" Fetch a keycloak role from the provided realm using the role's name.
@@ -1766,14 +1845,13 @@ class KeycloakAPI(object):
"""
role_url = URL_REALM_ROLE.format(url=self.baseurl, realm=realm, name=quote(name, safe=''))
try:
- return json.loads(to_native(open_url(role_url, method="GET", http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
- validate_certs=self.validate_certs).read()))
+ return self._request_and_deserialize(role_url, method="GET")
except HTTPError as e:
if e.code == 404:
return None
else:
- self.fail_open_url(e, msg='Could not fetch role %s in realm %s: %s'
- % (name, realm, str(e)))
+ self.fail_request(e, msg='Could not fetch role %s in realm %s: %s'
+ % (name, realm, str(e)))
except Exception as e:
self.module.fail_json(msg='Could not fetch role %s in realm %s: %s'
% (name, realm, str(e)))
@@ -1789,11 +1867,10 @@ class KeycloakAPI(object):
if "composites" in rolerep:
keycloak_compatible_composites = self.convert_role_composites(rolerep["composites"])
rolerep["composites"] = keycloak_compatible_composites
- return open_url(roles_url, method='POST', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
- data=json.dumps(rolerep), validate_certs=self.validate_certs)
+ return self._request(roles_url, method='POST', data=json.dumps(rolerep))
except Exception as e:
- self.fail_open_url(e, msg='Could not create role %s in realm %s: %s'
- % (rolerep['name'], realm, str(e)))
+ self.fail_request(e, msg='Could not create role %s in realm %s: %s'
+ % (rolerep['name'], realm, str(e)))
def update_realm_role(self, rolerep, realm='master'):
""" Update an existing realm role.
@@ -1807,14 +1884,13 @@ class KeycloakAPI(object):
if "composites" in rolerep:
composites = copy.deepcopy(rolerep["composites"])
del rolerep["composites"]
- role_response = open_url(role_url, method='PUT', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
- data=json.dumps(rolerep), validate_certs=self.validate_certs)
+ role_response = self._request(role_url, method='PUT', data=json.dumps(rolerep))
if composites is not None:
self.update_role_composites(rolerep=rolerep, composites=composites, realm=realm)
return role_response
except Exception as e:
- self.fail_open_url(e, msg='Could not update role %s in realm %s: %s'
- % (rolerep['name'], realm, str(e)))
+ self.fail_request(e, msg='Could not update role %s in realm %s: %s'
+ % (rolerep['name'], realm, str(e)))
def get_role_composites(self, rolerep, clientid=None, realm='master'):
composite_url = ''
@@ -1826,16 +1902,10 @@ class KeycloakAPI(object):
else:
composite_url = URL_REALM_ROLE_COMPOSITES.format(url=self.baseurl, realm=realm, name=quote(rolerep["name"], safe=''))
# Get existing composites
- return json.loads(to_native(open_url(
- composite_url,
- method='GET',
- http_agent=self.http_agent,
- headers=self.restheaders,
- timeout=self.connection_timeout,
- validate_certs=self.validate_certs).read()))
+ return self._request_and_deserialize(composite_url, method='GET')
except Exception as e:
- self.fail_open_url(e, msg='Could not get role %s composites in realm %s: %s'
- % (rolerep['name'], realm, str(e)))
+ self.fail_request(e, msg='Could not get role %s composites in realm %s: %s'
+ % (rolerep['name'], realm, str(e)))
def create_role_composites(self, rolerep, composites, clientid=None, realm='master'):
composite_url = ''
@@ -1848,11 +1918,10 @@ class KeycloakAPI(object):
composite_url = URL_REALM_ROLE_COMPOSITES.format(url=self.baseurl, realm=realm, name=quote(rolerep["name"], safe=''))
# Get existing composites
# create new composites
- return open_url(composite_url, method='POST', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
- data=json.dumps(composites), validate_certs=self.validate_certs)
+ return self._request(composite_url, method='POST', data=json.dumps(composites))
except Exception as e:
- self.fail_open_url(e, msg='Could not create role %s composites in realm %s: %s'
- % (rolerep['name'], realm, str(e)))
+ self.fail_request(e, msg='Could not create role %s composites in realm %s: %s'
+ % (rolerep['name'], realm, str(e)))
def delete_role_composites(self, rolerep, composites, clientid=None, realm='master'):
composite_url = ''
@@ -1865,11 +1934,10 @@ class KeycloakAPI(object):
composite_url = URL_REALM_ROLE_COMPOSITES.format(url=self.baseurl, realm=realm, name=quote(rolerep["name"], safe=''))
# Get existing composites
# create new composites
- return open_url(composite_url, method='DELETE', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
- data=json.dumps(composites), validate_certs=self.validate_certs)
+ return self._request(composite_url, method='DELETE', data=json.dumps(composites))
except Exception as e:
- self.fail_open_url(e, msg='Could not create role %s composites in realm %s: %s'
- % (rolerep['name'], realm, str(e)))
+ self.fail_request(e, msg='Could not create role %s composites in realm %s: %s'
+ % (rolerep['name'], realm, str(e)))
def update_role_composites(self, rolerep, composites, clientid=None, realm='master'):
# Get existing composites
@@ -1893,7 +1961,7 @@ class KeycloakAPI(object):
and composite["name"] == existing_composite["name"]):
composite_found = True
break
- if (not composite_found and ('state' not in composite or composite['state'] == 'present')):
+ if not composite_found and ('state' not in composite or composite['state'] == 'present'):
if "client_id" in composite and composite['client_id'] is not None:
client_roles = self.get_client_roles(clientid=composite['client_id'], realm=realm)
for client_role in client_roles:
@@ -1929,11 +1997,10 @@ class KeycloakAPI(object):
"""
role_url = URL_REALM_ROLE.format(url=self.baseurl, realm=realm, name=quote(name, safe=''))
try:
- return open_url(role_url, method='DELETE', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
- validate_certs=self.validate_certs)
+ return self._request(role_url, method='DELETE')
except Exception as e:
- self.fail_open_url(e, msg='Unable to delete role %s in realm %s: %s'
- % (name, realm, str(e)))
+ self.fail_request(e, msg='Unable to delete role %s in realm %s: %s'
+ % (name, realm, str(e)))
def get_client_roles(self, clientid, realm='master'):
""" Obtains role representations for client roles in a specific client
@@ -1948,15 +2015,13 @@ class KeycloakAPI(object):
% (clientid, realm))
rolelist_url = URL_CLIENT_ROLES.format(url=self.baseurl, realm=realm, id=cid)
try:
- return json.loads(to_native(open_url(rolelist_url, method='GET', http_agent=self.http_agent, headers=self.restheaders,
- timeout=self.connection_timeout,
- validate_certs=self.validate_certs).read()))
+ return self._request_and_deserialize(rolelist_url, method='GET')
except ValueError as e:
self.module.fail_json(msg='API returned incorrect JSON when trying to obtain list of roles for client %s in realm %s: %s'
% (clientid, realm, str(e)))
except Exception as e:
- self.fail_open_url(e, msg='Could not obtain list of roles for client %s in realm %s: %s'
- % (clientid, realm, str(e)))
+ self.fail_request(e, msg='Could not obtain list of roles for client %s in realm %s: %s'
+ % (clientid, realm, str(e)))
def get_client_role(self, name, clientid, realm='master'):
""" Fetch a keycloak client role from the provided realm using the role's name.
@@ -1973,14 +2038,13 @@ class KeycloakAPI(object):
% (clientid, realm))
role_url = URL_CLIENT_ROLE.format(url=self.baseurl, realm=realm, id=cid, name=quote(name, safe=''))
try:
- return json.loads(to_native(open_url(role_url, method="GET", http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
- validate_certs=self.validate_certs).read()))
+ return self._request_and_deserialize(role_url, method="GET")
except HTTPError as e:
if e.code == 404:
return None
else:
- self.fail_open_url(e, msg='Could not fetch role %s in client %s of realm %s: %s'
- % (name, clientid, realm, str(e)))
+ self.fail_request(e, msg='Could not fetch role %s in client %s of realm %s: %s'
+ % (name, clientid, realm, str(e)))
except Exception as e:
self.module.fail_json(msg='Could not fetch role %s for client %s in realm %s: %s'
% (name, clientid, realm, str(e)))
@@ -2002,11 +2066,10 @@ class KeycloakAPI(object):
if "composites" in rolerep:
keycloak_compatible_composites = self.convert_role_composites(rolerep["composites"])
rolerep["composites"] = keycloak_compatible_composites
- return open_url(roles_url, method='POST', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
- data=json.dumps(rolerep), validate_certs=self.validate_certs)
+ return self._request(roles_url, method='POST', data=json.dumps(rolerep))
except Exception as e:
- self.fail_open_url(e, msg='Could not create role %s for client %s in realm %s: %s'
- % (rolerep['name'], clientid, realm, str(e)))
+ self.fail_request(e, msg='Could not create role %s for client %s in realm %s: %s'
+ % (rolerep['name'], clientid, realm, str(e)))
def convert_role_composites(self, composites):
keycloak_compatible_composites = {
@@ -2041,14 +2104,13 @@ class KeycloakAPI(object):
if "composites" in rolerep:
composites = copy.deepcopy(rolerep["composites"])
del rolerep['composites']
- update_role_response = open_url(role_url, method='PUT', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
- data=json.dumps(rolerep), validate_certs=self.validate_certs)
+ update_role_response = self._request(role_url, method='PUT', data=json.dumps(rolerep))
if composites is not None:
self.update_role_composites(rolerep=rolerep, clientid=clientid, composites=composites, realm=realm)
return update_role_response
except Exception as e:
- self.fail_open_url(e, msg='Could not update role %s for client %s in realm %s: %s'
- % (rolerep['name'], clientid, realm, str(e)))
+ self.fail_request(e, msg='Could not update role %s for client %s in realm %s: %s'
+ % (rolerep['name'], clientid, realm, str(e)))
def delete_client_role(self, name, clientid, realm="master"):
""" Delete a role. One of name or roleid must be provided.
@@ -2063,15 +2125,14 @@ class KeycloakAPI(object):
% (clientid, realm))
role_url = URL_CLIENT_ROLE.format(url=self.baseurl, realm=realm, id=cid, name=quote(name, safe=''))
try:
- return open_url(role_url, method='DELETE', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
- validate_certs=self.validate_certs)
+ return self._request(role_url, method='DELETE')
except Exception as e:
- self.fail_open_url(e, msg='Unable to delete role %s for client %s in realm %s: %s'
- % (name, clientid, realm, str(e)))
+ self.fail_request(e, msg='Unable to delete role %s for client %s in realm %s: %s'
+ % (name, clientid, realm, str(e)))
def get_authentication_flow_by_alias(self, alias, realm='master'):
"""
- Get an authentication flow by it's alias
+ Get an authentication flow by its alias
:param alias: Alias of the authentication flow to get.
:param realm: Realm.
:return: Authentication flow representation.
@@ -2079,16 +2140,14 @@ class KeycloakAPI(object):
try:
authentication_flow = {}
# Check if the authentication flow exists on the Keycloak serveraders
- authentications = json.load(open_url(URL_AUTHENTICATION_FLOWS.format(url=self.baseurl, realm=realm), method='GET',
- http_agent=self.http_agent, headers=self.restheaders,
- timeout=self.connection_timeout, validate_certs=self.validate_certs))
+ authentications = json.load(self._request(URL_AUTHENTICATION_FLOWS.format(url=self.baseurl, realm=realm), method='GET'))
for authentication in authentications:
if authentication["alias"] == alias:
authentication_flow = authentication
break
return authentication_flow
except Exception as e:
- self.fail_open_url(e, msg="Unable get authentication flow %s: %s" % (alias, str(e)))
+ self.fail_request(e, msg="Unable get authentication flow %s: %s" % (alias, str(e)))
def delete_authentication_flow_by_id(self, id, realm='master'):
"""
@@ -2100,11 +2159,10 @@ class KeycloakAPI(object):
flow_url = URL_AUTHENTICATION_FLOW.format(url=self.baseurl, realm=realm, id=id)
try:
- return open_url(flow_url, method='DELETE', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
- validate_certs=self.validate_certs)
+ return self._request(flow_url, method='DELETE')
except Exception as e:
- self.fail_open_url(e, msg='Could not delete authentication flow %s in realm %s: %s'
- % (id, realm, str(e)))
+ self.fail_request(e, msg='Could not delete authentication flow %s in realm %s: %s'
+ % (id, realm, str(e)))
def copy_auth_flow(self, config, realm='master'):
"""
@@ -2117,31 +2175,25 @@ class KeycloakAPI(object):
new_name = dict(
newName=config["alias"]
)
- open_url(
+ self._request(
URL_AUTHENTICATION_FLOW_COPY.format(
url=self.baseurl,
realm=realm,
copyfrom=quote(config["copyFrom"], safe='')),
method='POST',
- http_agent=self.http_agent, headers=self.restheaders,
- data=json.dumps(new_name),
- timeout=self.connection_timeout,
- validate_certs=self.validate_certs)
+ data=json.dumps(new_name))
flow_list = json.load(
- open_url(
+ self._request(
URL_AUTHENTICATION_FLOWS.format(url=self.baseurl,
realm=realm),
- method='GET',
- http_agent=self.http_agent, headers=self.restheaders,
- timeout=self.connection_timeout,
- validate_certs=self.validate_certs))
+ method='GET'))
for flow in flow_list:
if flow["alias"] == config["alias"]:
return flow
return None
except Exception as e:
- self.fail_open_url(e, msg='Could not copy authentication flow %s in realm %s: %s'
- % (config["alias"], realm, str(e)))
+ self.fail_request(e, msg='Could not copy authentication flow %s in realm %s: %s'
+ % (config["alias"], realm, str(e)))
def create_empty_auth_flow(self, config, realm='master'):
"""
@@ -2157,31 +2209,25 @@ class KeycloakAPI(object):
description=config["description"],
topLevel=True
)
- open_url(
+ self._request(
URL_AUTHENTICATION_FLOWS.format(
url=self.baseurl,
realm=realm),
method='POST',
- http_agent=self.http_agent, headers=self.restheaders,
- data=json.dumps(new_flow),
- timeout=self.connection_timeout,
- validate_certs=self.validate_certs)
+ data=json.dumps(new_flow))
flow_list = json.load(
- open_url(
+ self._request(
URL_AUTHENTICATION_FLOWS.format(
url=self.baseurl,
realm=realm),
- method='GET',
- http_agent=self.http_agent, headers=self.restheaders,
- timeout=self.connection_timeout,
- validate_certs=self.validate_certs))
+ method='GET'))
for flow in flow_list:
if flow["alias"] == config["alias"]:
return flow
return None
except Exception as e:
- self.fail_open_url(e, msg='Could not create empty authentication flow %s in realm %s: %s'
- % (config["alias"], realm, str(e)))
+ self.fail_request(e, msg='Could not create empty authentication flow %s in realm %s: %s'
+ % (config["alias"], realm, str(e)))
def update_authentication_executions(self, flowAlias, updatedExec, realm='master'):
""" Update authentication executions
@@ -2191,19 +2237,16 @@ class KeycloakAPI(object):
:return: HTTPResponse object on success
"""
try:
- open_url(
+ self._request(
URL_AUTHENTICATION_FLOW_EXECUTIONS.format(
url=self.baseurl,
realm=realm,
flowalias=quote(flowAlias, safe='')),
method='PUT',
- http_agent=self.http_agent, headers=self.restheaders,
- data=json.dumps(updatedExec),
- timeout=self.connection_timeout,
- validate_certs=self.validate_certs)
+ data=json.dumps(updatedExec))
except HTTPError as e:
- self.fail_open_url(e, msg="Unable to update execution '%s': %s: %s %s"
- % (flowAlias, repr(e), ";".join([e.url, e.msg, str(e.code), str(e.hdrs)]), str(updatedExec)))
+ self.fail_request(e, msg="Unable to update execution '%s': %s: %s %s"
+ % (flowAlias, repr(e), ";".join([e.url, e.msg, str(e.code), str(e.hdrs)]), str(updatedExec)))
except Exception as e:
self.module.fail_json(msg="Unable to update executions %s: %s" % (updatedExec, str(e)))
@@ -2215,18 +2258,15 @@ class KeycloakAPI(object):
:return: HTTPResponse object on success
"""
try:
- open_url(
+ self._request(
URL_AUTHENTICATION_EXECUTION_CONFIG.format(
url=self.baseurl,
realm=realm,
id=executionId),
method='POST',
- http_agent=self.http_agent, headers=self.restheaders,
- data=json.dumps(authenticationConfig),
- timeout=self.connection_timeout,
- validate_certs=self.validate_certs)
+ data=json.dumps(authenticationConfig))
except Exception as e:
- self.fail_open_url(e, msg="Unable to add authenticationConfig %s: %s" % (executionId, str(e)))
+ self.fail_request(e, msg="Unable to add authenticationConfig %s: %s" % (executionId, str(e)))
def delete_authentication_config(self, configId, realm='master'):
""" Delete authenticator config
@@ -2257,18 +2297,15 @@ class KeycloakAPI(object):
newSubFlow["alias"] = subflowName
newSubFlow["provider"] = "registration-page-form"
newSubFlow["type"] = flowType
- open_url(
+ self._request(
URL_AUTHENTICATION_FLOW_EXECUTIONS_FLOW.format(
url=self.baseurl,
realm=realm,
flowalias=quote(flowAlias, safe='')),
method='POST',
- http_agent=self.http_agent, headers=self.restheaders,
- data=json.dumps(newSubFlow),
- timeout=self.connection_timeout,
- validate_certs=self.validate_certs)
+ data=json.dumps(newSubFlow))
except Exception as e:
- self.fail_open_url(e, msg="Unable to create new subflow %s: %s" % (subflowName, str(e)))
+ self.fail_request(e, msg="Unable to create new subflow %s: %s" % (subflowName, str(e)))
def create_execution(self, execution, flowAlias, realm='master'):
""" Create new execution on the flow
@@ -2281,19 +2318,16 @@ class KeycloakAPI(object):
newExec = {}
newExec["provider"] = execution["providerId"]
newExec["requirement"] = execution["requirement"]
- open_url(
+ self._request(
URL_AUTHENTICATION_FLOW_EXECUTIONS_EXECUTION.format(
url=self.baseurl,
realm=realm,
flowalias=quote(flowAlias, safe='')),
method='POST',
- http_agent=self.http_agent, headers=self.restheaders,
- data=json.dumps(newExec),
- timeout=self.connection_timeout,
- validate_certs=self.validate_certs)
+ data=json.dumps(newExec))
except HTTPError as e:
- self.fail_open_url(e, msg="Unable to create new execution '%s' %s: %s: %s %s"
- % (flowAlias, execution["providerId"], repr(e), ";".join([e.url, e.msg, str(e.code), str(e.hdrs)]), str(newExec)))
+ self.fail_request(e, msg="Unable to create new execution '%s' %s: %s: %s %s"
+ % (flowAlias, execution["providerId"], repr(e), ";".join([e.url, e.msg, str(e.code), str(e.hdrs)]), str(newExec)))
except Exception as e:
self.module.fail_json(msg="Unable to create new execution '%s' %s: %s" % (flowAlias, execution["providerId"], repr(e)))
@@ -2308,28 +2342,22 @@ class KeycloakAPI(object):
try:
if diff > 0:
for i in range(diff):
- open_url(
+ self._request(
URL_AUTHENTICATION_EXECUTION_RAISE_PRIORITY.format(
url=self.baseurl,
realm=realm,
id=executionId),
- method='POST',
- http_agent=self.http_agent, headers=self.restheaders,
- timeout=self.connection_timeout,
- validate_certs=self.validate_certs)
+ method='POST')
elif diff < 0:
for i in range(-diff):
- open_url(
+ self._request(
URL_AUTHENTICATION_EXECUTION_LOWER_PRIORITY.format(
url=self.baseurl,
realm=realm,
id=executionId),
- method='POST',
- http_agent=self.http_agent, headers=self.restheaders,
- timeout=self.connection_timeout,
- validate_certs=self.validate_certs)
+ method='POST')
except Exception as e:
- self.fail_open_url(e, msg="Unable to change execution priority %s: %s" % (executionId, str(e)))
+ self.fail_request(e, msg="Unable to change execution priority %s: %s" % (executionId, str(e)))
def get_executions_representation(self, config, realm='master'):
"""
@@ -2341,33 +2369,27 @@ class KeycloakAPI(object):
try:
# Get executions created
executions = json.load(
- open_url(
+ self._request(
URL_AUTHENTICATION_FLOW_EXECUTIONS.format(
url=self.baseurl,
realm=realm,
flowalias=quote(config["alias"], safe='')),
- method='GET',
- http_agent=self.http_agent, headers=self.restheaders,
- timeout=self.connection_timeout,
- validate_certs=self.validate_certs))
+ method='GET'))
for execution in executions:
if "authenticationConfig" in execution:
execConfigId = execution["authenticationConfig"]
execConfig = json.load(
- open_url(
+ self._request(
URL_AUTHENTICATION_CONFIG.format(
url=self.baseurl,
realm=realm,
id=execConfigId),
- method='GET',
- http_agent=self.http_agent, headers=self.restheaders,
- timeout=self.connection_timeout,
- validate_certs=self.validate_certs))
+ method='GET'))
execution["authenticationConfig"] = execConfig
return executions
except Exception as e:
- self.fail_open_url(e, msg='Could not get executions for authentication flow %s in realm %s: %s'
- % (config["alias"], realm, str(e)))
+ self.fail_request(e, msg='Could not get executions for authentication flow %s in realm %s: %s'
+ % (config["alias"], realm, str(e)))
def get_required_actions(self, realm='master'):
"""
@@ -2378,15 +2400,12 @@ class KeycloakAPI(object):
try:
required_actions = json.load(
- open_url(
+ self._request(
URL_AUTHENTICATION_REQUIRED_ACTIONS.format(
url=self.baseurl,
realm=realm
),
- method='GET',
- http_agent=self.http_agent, headers=self.restheaders,
- timeout=self.connection_timeout,
- validate_certs=self.validate_certs
+ method='GET'
)
)
@@ -2408,19 +2427,16 @@ class KeycloakAPI(object):
}
try:
- return open_url(
+ return self._request(
URL_AUTHENTICATION_REGISTER_REQUIRED_ACTION.format(
url=self.baseurl,
realm=realm
),
method='POST',
- http_agent=self.http_agent, headers=self.restheaders,
data=json.dumps(data),
- timeout=self.connection_timeout,
- validate_certs=self.validate_certs
)
except Exception as e:
- self.fail_open_url(
+ self.fail_request(
e,
msg='Unable to register required action %s in realm %s: %s'
% (rep["name"], realm, str(e))
@@ -2436,20 +2452,17 @@ class KeycloakAPI(object):
"""
try:
- return open_url(
+ return self._request(
URL_AUTHENTICATION_REQUIRED_ACTIONS_ALIAS.format(
url=self.baseurl,
alias=quote(alias, safe=''),
realm=realm
),
method='PUT',
- http_agent=self.http_agent, headers=self.restheaders,
data=json.dumps(rep),
- timeout=self.connection_timeout,
- validate_certs=self.validate_certs
)
except Exception as e:
- self.fail_open_url(
+ self.fail_request(
e,
msg='Unable to update required action %s in realm %s: %s'
% (alias, realm, str(e))
@@ -2464,19 +2477,16 @@ class KeycloakAPI(object):
"""
try:
- return open_url(
+ return self._request(
URL_AUTHENTICATION_REQUIRED_ACTIONS_ALIAS.format(
url=self.baseurl,
alias=quote(alias, safe=''),
realm=realm
),
method='DELETE',
- http_agent=self.http_agent, headers=self.restheaders,
- timeout=self.connection_timeout,
- validate_certs=self.validate_certs
)
except Exception as e:
- self.fail_open_url(
+ self.fail_request(
e,
msg='Unable to delete required action %s in realm %s: %s'
% (alias, realm, str(e))
@@ -2489,14 +2499,13 @@ class KeycloakAPI(object):
"""
idps_url = URL_IDENTITY_PROVIDERS.format(url=self.baseurl, realm=realm)
try:
- return json.loads(to_native(open_url(idps_url, method='GET', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
- validate_certs=self.validate_certs).read()))
+ return self._request_and_deserialize(idps_url, method='GET')
except ValueError as e:
self.module.fail_json(msg='API returned incorrect JSON when trying to obtain list of identity providers for realm %s: %s'
% (realm, str(e)))
except Exception as e:
- self.fail_open_url(e, msg='Could not obtain list of identity providers for realm %s: %s'
- % (realm, str(e)))
+ self.fail_request(e, msg='Could not obtain list of identity providers for realm %s: %s'
+ % (realm, str(e)))
def get_identity_provider(self, alias, realm='master'):
""" Fetch identity provider representation from a realm using the idp's alias.
@@ -2506,14 +2515,13 @@ class KeycloakAPI(object):
"""
idp_url = URL_IDENTITY_PROVIDER.format(url=self.baseurl, realm=realm, alias=alias)
try:
- return json.loads(to_native(open_url(idp_url, method="GET", http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
- validate_certs=self.validate_certs).read()))
+ return self._request_and_deserialize(idp_url, method="GET")
except HTTPError as e:
if e.code == 404:
return None
else:
- self.fail_open_url(e, msg='Could not fetch identity provider %s in realm %s: %s'
- % (alias, realm, str(e)))
+ self.fail_request(e, msg='Could not fetch identity provider %s in realm %s: %s'
+ % (alias, realm, str(e)))
except Exception as e:
self.module.fail_json(msg='Could not fetch identity provider %s in realm %s: %s'
% (alias, realm, str(e)))
@@ -2526,11 +2534,10 @@ class KeycloakAPI(object):
"""
idps_url = URL_IDENTITY_PROVIDERS.format(url=self.baseurl, realm=realm)
try:
- return open_url(idps_url, method='POST', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
- data=json.dumps(idprep), validate_certs=self.validate_certs)
+ return self._request(idps_url, method='POST', data=json.dumps(idprep))
except Exception as e:
- self.fail_open_url(e, msg='Could not create identity provider %s in realm %s: %s'
- % (idprep['alias'], realm, str(e)))
+ self.fail_request(e, msg='Could not create identity provider %s in realm %s: %s'
+ % (idprep['alias'], realm, str(e)))
def update_identity_provider(self, idprep, realm='master'):
""" Update an existing identity provider.
@@ -2540,11 +2547,10 @@ class KeycloakAPI(object):
"""
idp_url = URL_IDENTITY_PROVIDER.format(url=self.baseurl, realm=realm, alias=idprep['alias'])
try:
- return open_url(idp_url, method='PUT', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
- data=json.dumps(idprep), validate_certs=self.validate_certs)
+ return self._request(idp_url, method='PUT', data=json.dumps(idprep))
except Exception as e:
- self.fail_open_url(e, msg='Could not update identity provider %s in realm %s: %s'
- % (idprep['alias'], realm, str(e)))
+ self.fail_request(e, msg='Could not update identity provider %s in realm %s: %s'
+ % (idprep['alias'], realm, str(e)))
def delete_identity_provider(self, alias, realm='master'):
""" Delete an identity provider.
@@ -2553,11 +2559,10 @@ class KeycloakAPI(object):
"""
idp_url = URL_IDENTITY_PROVIDER.format(url=self.baseurl, realm=realm, alias=alias)
try:
- return open_url(idp_url, method='DELETE', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
- validate_certs=self.validate_certs)
+ return self._request(idp_url, method='DELETE')
except Exception as e:
- self.fail_open_url(e, msg='Unable to delete identity provider %s in realm %s: %s'
- % (alias, realm, str(e)))
+ self.fail_request(e, msg='Unable to delete identity provider %s in realm %s: %s'
+ % (alias, realm, str(e)))
def get_identity_provider_mappers(self, alias, realm='master'):
""" Fetch representations for identity provider mappers
@@ -2567,15 +2572,13 @@ class KeycloakAPI(object):
"""
mappers_url = URL_IDENTITY_PROVIDER_MAPPERS.format(url=self.baseurl, realm=realm, alias=alias)
try:
- return json.loads(to_native(open_url(mappers_url, method='GET', http_agent=self.http_agent, headers=self.restheaders,
- timeout=self.connection_timeout,
- validate_certs=self.validate_certs).read()))
+ return self._request_and_deserialize(mappers_url, method='GET')
except ValueError as e:
self.module.fail_json(msg='API returned incorrect JSON when trying to obtain list of identity provider mappers for idp %s in realm %s: %s'
% (alias, realm, str(e)))
except Exception as e:
- self.fail_open_url(e, msg='Could not obtain list of identity provider mappers for idp %s in realm %s: %s'
- % (alias, realm, str(e)))
+ self.fail_request(e, msg='Could not obtain list of identity provider mappers for idp %s in realm %s: %s'
+ % (alias, realm, str(e)))
def get_identity_provider_mapper(self, mid, alias, realm='master'):
""" Fetch identity provider representation from a realm using the idp's alias.
@@ -2586,15 +2589,13 @@ class KeycloakAPI(object):
"""
mapper_url = URL_IDENTITY_PROVIDER_MAPPER.format(url=self.baseurl, realm=realm, alias=alias, id=mid)
try:
- return json.loads(to_native(open_url(mapper_url, method="GET", http_agent=self.http_agent, headers=self.restheaders,
- timeout=self.connection_timeout,
- validate_certs=self.validate_certs).read()))
+ return self._request_and_deserialize(mapper_url, method="GET")
except HTTPError as e:
if e.code == 404:
return None
else:
- self.fail_open_url(e, msg='Could not fetch mapper %s for identity provider %s in realm %s: %s'
- % (mid, alias, realm, str(e)))
+ self.fail_request(e, msg='Could not fetch mapper %s for identity provider %s in realm %s: %s'
+ % (mid, alias, realm, str(e)))
except Exception as e:
self.module.fail_json(msg='Could not fetch mapper %s for identity provider %s in realm %s: %s'
% (mid, alias, realm, str(e)))
@@ -2608,11 +2609,10 @@ class KeycloakAPI(object):
"""
mappers_url = URL_IDENTITY_PROVIDER_MAPPERS.format(url=self.baseurl, realm=realm, alias=alias)
try:
- return open_url(mappers_url, method='POST', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
- data=json.dumps(mapper), validate_certs=self.validate_certs)
+ return self._request(mappers_url, method='POST', data=json.dumps(mapper))
except Exception as e:
- self.fail_open_url(e, msg='Could not create identity provider mapper %s for idp %s in realm %s: %s'
- % (mapper['name'], alias, realm, str(e)))
+ self.fail_request(e, msg='Could not create identity provider mapper %s for idp %s in realm %s: %s'
+ % (mapper['name'], alias, realm, str(e)))
def update_identity_provider_mapper(self, mapper, alias, realm='master'):
""" Update an existing identity provider.
@@ -2623,11 +2623,10 @@ class KeycloakAPI(object):
"""
mapper_url = URL_IDENTITY_PROVIDER_MAPPER.format(url=self.baseurl, realm=realm, alias=alias, id=mapper['id'])
try:
- return open_url(mapper_url, method='PUT', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
- data=json.dumps(mapper), validate_certs=self.validate_certs)
+ return self._request(mapper_url, method='PUT', data=json.dumps(mapper))
except Exception as e:
- self.fail_open_url(e, msg='Could not update mapper %s for identity provider %s in realm %s: %s'
- % (mapper['id'], alias, realm, str(e)))
+ self.fail_request(e, msg='Could not update mapper %s for identity provider %s in realm %s: %s'
+ % (mapper['id'], alias, realm, str(e)))
def delete_identity_provider_mapper(self, mid, alias, realm='master'):
""" Delete an identity provider.
@@ -2637,11 +2636,10 @@ class KeycloakAPI(object):
"""
mapper_url = URL_IDENTITY_PROVIDER_MAPPER.format(url=self.baseurl, realm=realm, alias=alias, id=mid)
try:
- return open_url(mapper_url, method='DELETE', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
- validate_certs=self.validate_certs)
+ return self._request(mapper_url, method='DELETE')
except Exception as e:
- self.fail_open_url(e, msg='Unable to delete mapper %s for identity provider %s in realm %s: %s'
- % (mid, alias, realm, str(e)))
+ self.fail_request(e, msg='Unable to delete mapper %s for identity provider %s in realm %s: %s'
+ % (mid, alias, realm, str(e)))
def get_components(self, filter=None, realm='master'):
""" Fetch representations for components in a realm
@@ -2654,14 +2652,13 @@ class KeycloakAPI(object):
comps_url += '?%s' % filter
try:
- return json.loads(to_native(open_url(comps_url, method='GET', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
- validate_certs=self.validate_certs).read()))
+ return self._request_and_deserialize(comps_url, method='GET')
except ValueError as e:
self.module.fail_json(msg='API returned incorrect JSON when trying to obtain list of components for realm %s: %s'
% (realm, str(e)))
except Exception as e:
- self.fail_open_url(e, msg='Could not obtain list of components for realm %s: %s'
- % (realm, str(e)))
+ self.fail_request(e, msg='Could not obtain list of components for realm %s: %s'
+ % (realm, str(e)))
def get_component(self, cid, realm='master'):
""" Fetch component representation from a realm using its cid.
@@ -2671,14 +2668,13 @@ class KeycloakAPI(object):
"""
comp_url = URL_COMPONENT.format(url=self.baseurl, realm=realm, id=cid)
try:
- return json.loads(to_native(open_url(comp_url, method="GET", http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
- validate_certs=self.validate_certs).read()))
+ return self._request_and_deserialize(comp_url, method="GET")
except HTTPError as e:
if e.code == 404:
return None
else:
- self.fail_open_url(e, msg='Could not fetch component %s in realm %s: %s'
- % (cid, realm, str(e)))
+ self.fail_request(e, msg='Could not fetch component %s in realm %s: %s'
+ % (cid, realm, str(e)))
except Exception as e:
self.module.fail_json(msg='Could not fetch component %s in realm %s: %s'
% (cid, realm, str(e)))
@@ -2691,17 +2687,15 @@ class KeycloakAPI(object):
"""
comps_url = URL_COMPONENTS.format(url=self.baseurl, realm=realm)
try:
- resp = open_url(comps_url, method='POST', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
- data=json.dumps(comprep), validate_certs=self.validate_certs)
+ resp = self._request(comps_url, method='POST', data=json.dumps(comprep))
comp_url = resp.getheader('Location')
if comp_url is None:
self.module.fail_json(msg='Could not create component in realm %s: %s'
% (realm, 'unexpected response'))
- return json.loads(to_native(open_url(comp_url, method="GET", http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
- validate_certs=self.validate_certs).read()))
+ return self._request_and_deserialize(comp_url, method="GET")
except Exception as e:
- self.fail_open_url(e, msg='Could not create component in realm %s: %s'
- % (realm, str(e)))
+ self.fail_request(e, msg='Could not create component in realm %s: %s'
+ % (realm, str(e)))
def update_component(self, comprep, realm='master'):
""" Update an existing component.
@@ -2714,11 +2708,10 @@ class KeycloakAPI(object):
self.module.fail_json(msg='Cannot update component without id')
comp_url = URL_COMPONENT.format(url=self.baseurl, realm=realm, id=cid)
try:
- return open_url(comp_url, method='PUT', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
- data=json.dumps(comprep), validate_certs=self.validate_certs)
+ return self._request(comp_url, method='PUT', data=json.dumps(comprep))
except Exception as e:
- self.fail_open_url(e, msg='Could not update component %s in realm %s: %s'
- % (cid, realm, str(e)))
+ self.fail_request(e, msg='Could not update component %s in realm %s: %s'
+ % (cid, realm, str(e)))
def delete_component(self, cid, realm='master'):
""" Delete an component.
@@ -2727,20 +2720,17 @@ class KeycloakAPI(object):
"""
comp_url = URL_COMPONENT.format(url=self.baseurl, realm=realm, id=cid)
try:
- return open_url(comp_url, method='DELETE', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
- validate_certs=self.validate_certs)
+ return self._request(comp_url, method='DELETE')
except Exception as e:
- self.fail_open_url(e, msg='Unable to delete component %s in realm %s: %s'
- % (cid, realm, str(e)))
+ self.fail_request(e, msg='Unable to delete component %s in realm %s: %s'
+ % (cid, realm, str(e)))
def get_authz_authorization_scope_by_name(self, name, client_id, realm):
url = URL_AUTHZ_AUTHORIZATION_SCOPES.format(url=self.baseurl, client_id=client_id, realm=realm)
search_url = "%s/search?name=%s" % (url, quote(name, safe=''))
try:
- return json.loads(to_native(open_url(search_url, method='GET', http_agent=self.http_agent, headers=self.restheaders,
- timeout=self.connection_timeout,
- validate_certs=self.validate_certs).read()))
+ return self._request_and_deserialize(search_url, method='GET')
except Exception:
return False
@@ -2749,30 +2739,27 @@ class KeycloakAPI(object):
url = URL_AUTHZ_AUTHORIZATION_SCOPES.format(url=self.baseurl, client_id=client_id, realm=realm)
try:
- return open_url(url, method='POST', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
- data=json.dumps(payload), validate_certs=self.validate_certs)
+ return self._request(url, method='POST', data=json.dumps(payload))
except Exception as e:
- self.fail_open_url(e, msg='Could not create authorization scope %s for client %s in realm %s: %s' % (payload['name'], client_id, realm, str(e)))
+ self.fail_request(e, msg='Could not create authorization scope %s for client %s in realm %s: %s' % (payload['name'], client_id, realm, str(e)))
def update_authz_authorization_scope(self, payload, id, client_id, realm):
"""Update an authorization scope for a Keycloak client"""
url = URL_AUTHZ_AUTHORIZATION_SCOPE.format(url=self.baseurl, id=id, client_id=client_id, realm=realm)
try:
- return open_url(url, method='PUT', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
- data=json.dumps(payload), validate_certs=self.validate_certs)
+ return self._request(url, method='PUT', data=json.dumps(payload))
except Exception as e:
- self.fail_open_url(e, msg='Could not create update scope %s for client %s in realm %s: %s' % (payload['name'], client_id, realm, str(e)))
+ self.fail_request(e, msg='Could not create update scope %s for client %s in realm %s: %s' % (payload['name'], client_id, realm, str(e)))
def remove_authz_authorization_scope(self, id, client_id, realm):
"""Remove an authorization scope from a Keycloak client"""
url = URL_AUTHZ_AUTHORIZATION_SCOPE.format(url=self.baseurl, id=id, client_id=client_id, realm=realm)
try:
- return open_url(url, method='DELETE', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
- validate_certs=self.validate_certs)
+ return self._request(url, method='DELETE')
except Exception as e:
- self.fail_open_url(e, msg='Could not delete scope %s for client %s in realm %s: %s' % (id, client_id, realm, str(e)))
+ self.fail_request(e, msg='Could not delete scope %s for client %s in realm %s: %s' % (id, client_id, realm, str(e)))
def get_user_by_id(self, user_id, realm='master'):
"""
@@ -2787,16 +2774,13 @@ class KeycloakAPI(object):
realm=realm,
id=user_id)
userrep = json.load(
- open_url(
+ self._request(
user_url,
- method='GET',
- http_agent=self.http_agent, headers=self.restheaders,
- timeout=self.connection_timeout,
- validate_certs=self.validate_certs))
+ method='GET'))
return userrep
except Exception as e:
- self.fail_open_url(e, msg='Could not get user %s in realm %s: %s'
- % (user_id, realm, str(e)))
+ self.fail_request(e, msg='Could not get user %s in realm %s: %s'
+ % (user_id, realm, str(e)))
def create_user(self, userrep, realm='master'):
"""
@@ -2812,19 +2796,16 @@ class KeycloakAPI(object):
users_url = URL_USERS.format(
url=self.baseurl,
realm=realm)
- open_url(users_url,
- method='POST',
- http_agent=self.http_agent, headers=self.restheaders,
- data=json.dumps(userrep),
- timeout=self.connection_timeout,
- validate_certs=self.validate_certs)
+ self._request(users_url,
+ method='POST',
+ data=json.dumps(userrep))
created_user = self.get_user_by_username(
username=userrep['username'],
realm=realm)
return created_user
except Exception as e:
- self.fail_open_url(e, msg='Could not create user %s in realm %s: %s'
- % (userrep['username'], realm, str(e)))
+ self.fail_request(e, msg='Could not create user %s in realm %s: %s'
+ % (userrep['username'], realm, str(e)))
def convert_user_attributes_to_keycloak_dict(self, attributes):
keycloak_user_attributes_dict = {}
@@ -2857,20 +2838,17 @@ class KeycloakAPI(object):
url=self.baseurl,
realm=realm,
id=userrep["id"])
- open_url(
+ self._request(
user_url,
method='PUT',
- http_agent=self.http_agent, headers=self.restheaders,
- data=json.dumps(userrep),
- timeout=self.connection_timeout,
- validate_certs=self.validate_certs)
+ data=json.dumps(userrep))
updated_user = self.get_user_by_id(
user_id=userrep['id'],
realm=realm)
return updated_user
except Exception as e:
- self.fail_open_url(e, msg='Could not update user %s in realm %s: %s'
- % (userrep['username'], realm, str(e)))
+ self.fail_request(e, msg='Could not update user %s in realm %s: %s'
+ % (userrep['username'], realm, str(e)))
def delete_user(self, user_id, realm='master'):
"""
@@ -2884,44 +2862,42 @@ class KeycloakAPI(object):
url=self.baseurl,
realm=realm,
id=user_id)
- return open_url(
+ return self._request(
user_url,
- method='DELETE',
- http_agent=self.http_agent, headers=self.restheaders,
- timeout=self.connection_timeout,
- validate_certs=self.validate_certs)
+ method='DELETE')
except Exception as e:
- self.fail_open_url(e, msg='Could not delete user %s in realm %s: %s'
- % (user_id, realm, str(e)))
+ self.fail_request(e, msg='Could not delete user %s in realm %s: %s'
+ % (user_id, realm, str(e)))
def get_user_groups(self, user_id, realm='master'):
"""
- Get groups for a user.
+ Get the group names for a user.
:param user_id: User ID
:param realm: Realm
- :return: Representation of the client groups.
+ :return: The client group names as a list of strings.
+ """
+ user_groups = self.get_user_group_details(user_id, realm)
+ return [user_group['name'] for user_group in user_groups if 'name' in user_group]
+
+ def get_user_group_details(self, user_id, realm='master'):
+ """
+ Get the group details for a user.
+ :param user_id: User ID
+ :param realm: Realm
+ :return: The client group details as a list of dictionaries.
"""
try:
- groups = []
- user_groups_url = URL_USER_GROUPS.format(
- url=self.baseurl,
- realm=realm,
- id=user_id)
- user_groups = json.load(
- open_url(
- user_groups_url,
- method='GET',
- http_agent=self.http_agent, headers=self.restheaders,
- timeout=self.connection_timeout,
- validate_certs=self.validate_certs))
- for user_group in user_groups:
- groups.append(user_group["name"])
- return groups
+ user_groups_url = URL_USER_GROUPS.format(url=self.baseurl, realm=realm, id=user_id)
+ return self._request_and_deserialize(user_groups_url, method='GET')
except Exception as e:
- self.fail_open_url(e, msg='Could not get groups for user %s in realm %s: %s'
- % (user_id, realm, str(e)))
+ self.fail_request(e, msg='Could not get groups for user %s in realm %s: %s'
+ % (user_id, realm, str(e)))
def add_user_in_group(self, user_id, group_id, realm='master'):
+ """DEPRECATED: Call add_user_to_group(...) instead. This method is scheduled for removal in community.general 13.0.0."""
+ return self.add_user_to_group(user_id, group_id, realm)
+
+ def add_user_to_group(self, user_id, group_id, realm='master'):
"""
Add a user to a group.
:param user_id: User ID
@@ -2935,15 +2911,12 @@ class KeycloakAPI(object):
realm=realm,
id=user_id,
group_id=group_id)
- return open_url(
+ return self._request(
user_group_url,
- method='PUT',
- http_agent=self.http_agent, headers=self.restheaders,
- timeout=self.connection_timeout,
- validate_certs=self.validate_certs)
+ method='PUT')
except Exception as e:
- self.fail_open_url(e, msg='Could not add user %s in group %s in realm %s: %s'
- % (user_id, group_id, realm, str(e)))
+ self.fail_request(e, msg='Could not add user %s to group %s in realm %s: %s'
+ % (user_id, group_id, realm, str(e)))
def remove_user_from_group(self, user_id, group_id, realm='master'):
"""
@@ -2959,15 +2932,12 @@ class KeycloakAPI(object):
realm=realm,
id=user_id,
group_id=group_id)
- return open_url(
+ return self._request(
user_group_url,
- method='DELETE',
- http_agent=self.http_agent, headers=self.restheaders,
- timeout=self.connection_timeout,
- validate_certs=self.validate_certs)
+ method='DELETE')
except Exception as e:
- self.fail_open_url(e, msg='Could not remove user %s from group %s in realm %s: %s'
- % (user_id, group_id, realm, str(e)))
+ self.fail_request(e, msg='Could not remove user %s from group %s in realm %s: %s'
+ % (user_id, group_id, realm, str(e)))
def update_user_groups_membership(self, userrep, groups, realm='master'):
"""
@@ -2976,49 +2946,72 @@ class KeycloakAPI(object):
:param realm: Realm
:return: True if group membership has been changed. False Otherwise.
"""
- changed = False
try:
- user_existing_groups = self.get_user_groups(
- user_id=userrep['id'],
- realm=realm)
- groups_to_add_and_remove = self.extract_groups_to_add_to_and_remove_from_user(groups)
- # If group membership need to be changed
- if not is_struct_included(groups_to_add_and_remove['add'], user_existing_groups):
- # Get available groups in the realm
- realm_groups = self.get_groups(realm=realm)
- for realm_group in realm_groups:
- if "name" in realm_group and realm_group["name"] in groups_to_add_and_remove['add']:
- self.add_user_in_group(
- user_id=userrep["id"],
- group_id=realm_group["id"],
- realm=realm)
- changed = True
- elif "name" in realm_group and realm_group['name'] in groups_to_add_and_remove['remove']:
- self.remove_user_from_group(
- user_id=userrep['id'],
- group_id=realm_group['id'],
- realm=realm)
- changed = True
- return changed
+ groups_to_add, groups_to_remove = self.extract_groups_to_add_to_and_remove_from_user(groups)
+ if not groups_to_add and not groups_to_remove:
+ return False
+
+ user_groups = self.get_user_group_details(user_id=userrep['id'], realm=realm)
+ user_group_names = [user_group['name'] for user_group in user_groups if 'name' in user_group]
+ user_group_paths = [user_group['path'] for user_group in user_groups if 'path' in user_group]
+
+ groups_to_add = [group_to_add for group_to_add in groups_to_add
+ if group_to_add not in user_group_names and group_to_add not in user_group_paths]
+ groups_to_remove = [group_to_remove for group_to_remove in groups_to_remove
+ if group_to_remove in user_group_names or group_to_remove in user_group_paths]
+ if not groups_to_add and not groups_to_remove:
+ return False
+
+ for group_to_add in groups_to_add:
+ realm_group = self.find_group_by_path(group_to_add, realm=realm)
+ if realm_group:
+ self.add_user_to_group(user_id=userrep['id'], group_id=realm_group['id'], realm=realm)
+
+ for group_to_remove in groups_to_remove:
+ realm_group = self.find_group_by_path(group_to_remove, realm=realm)
+ if realm_group:
+ self.remove_user_from_group(user_id=userrep['id'], group_id=realm_group['id'], realm=realm)
+
+ return True
except Exception as e:
self.module.fail_json(msg='Could not update group membership for user %s in realm %s: %s'
- % (userrep['id]'], realm, str(e)))
+ % (userrep['username'], realm, e))
def extract_groups_to_add_to_and_remove_from_user(self, groups):
- groups_extract = {}
groups_to_add = []
groups_to_remove = []
- if isinstance(groups, list) and len(groups) > 0:
+ if isinstance(groups, list):
for group in groups:
group_name = group['name'] if isinstance(group, dict) and 'name' in group else group
- if isinstance(group, dict) and ('state' not in group or group['state'] == 'present'):
- groups_to_add.append(group_name)
- else:
- groups_to_remove.append(group_name)
- groups_extract['add'] = groups_to_add
- groups_extract['remove'] = groups_to_remove
+ if isinstance(group, dict):
+ if 'state' not in group or group['state'] == 'present':
+ groups_to_add.append(group_name)
+ else:
+ groups_to_remove.append(group_name)
+ return groups_to_add, groups_to_remove
- return groups_extract
+ def find_group_by_path(self, target, realm='master'):
+ """
+ Finds a realm group by path, e.g. '/my/group'.
+ The path is formed by prepending a '/' character to `target` unless it's already present.
+ This adds support for finding top level groups by name and subgroups by path.
+ """
+ groups = self.get_groups(realm=realm)
+ path = target if target.startswith('/') else '/' + target
+ for segment in path.split('/'):
+ if not segment:
+ continue
+ abort = True
+ for group in groups:
+ if group['path'] == path:
+ return self.get_group_by_groupid(group['id'], realm=realm)
+ if group['name'] == segment:
+ groups = self.get_subgroups(group, realm=realm)
+ abort = False
+ break
+ if abort:
+ break
+ return None
def convert_user_group_list_of_str_to_list_of_dict(self, groups):
list_of_groups = []
@@ -3035,10 +3028,9 @@ class KeycloakAPI(object):
url = URL_AUTHZ_CUSTOM_POLICY.format(url=self.baseurl, policy_type=policy_type, client_id=client_id, realm=realm)
try:
- return open_url(url, method='POST', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
- data=json.dumps(payload), validate_certs=self.validate_certs)
+ return self._request(url, method='POST', data=json.dumps(payload))
except Exception as e:
- self.fail_open_url(e, msg='Could not create permission %s for client %s in realm %s: %s' % (payload['name'], client_id, realm, str(e)))
+ self.fail_request(e, msg='Could not create permission %s for client %s in realm %s: %s' % (payload['name'], client_id, realm, str(e)))
def remove_authz_custom_policy(self, policy_id, client_id, realm):
"""Remove a custom policy from a Keycloak client"""
@@ -3046,10 +3038,9 @@ class KeycloakAPI(object):
delete_url = "%s/%s" % (url, policy_id)
try:
- return open_url(delete_url, method='DELETE', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
- validate_certs=self.validate_certs)
+ return self._request(delete_url, method='DELETE')
except Exception as e:
- self.fail_open_url(e, msg='Could not delete custom policy %s for client %s in realm %s: %s' % (id, client_id, realm, str(e)))
+ self.fail_request(e, msg='Could not delete custom policy %s for client %s in realm %s: %s' % (id, client_id, realm, str(e)))
def get_authz_permission_by_name(self, name, client_id, realm):
"""Get authorization permission by name"""
@@ -3057,9 +3048,7 @@ class KeycloakAPI(object):
search_url = "%s/search?name=%s" % (url, name.replace(' ', '%20'))
try:
- return json.loads(to_native(open_url(search_url, method='GET', http_agent=self.http_agent, headers=self.restheaders,
- timeout=self.connection_timeout,
- validate_certs=self.validate_certs).read()))
+ return self._request_and_deserialize(search_url, method='GET')
except Exception:
return False
@@ -3068,30 +3057,27 @@ class KeycloakAPI(object):
url = URL_AUTHZ_PERMISSIONS.format(url=self.baseurl, permission_type=permission_type, client_id=client_id, realm=realm)
try:
- return open_url(url, method='POST', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
- data=json.dumps(payload), validate_certs=self.validate_certs)
+ return self._request(url, method='POST', data=json.dumps(payload))
except Exception as e:
- self.fail_open_url(e, msg='Could not create permission %s for client %s in realm %s: %s' % (payload['name'], client_id, realm, str(e)))
+ self.fail_request(e, msg='Could not create permission %s for client %s in realm %s: %s' % (payload['name'], client_id, realm, str(e)))
def remove_authz_permission(self, id, client_id, realm):
"""Create an authorization permission for a Keycloak client"""
url = URL_AUTHZ_POLICY.format(url=self.baseurl, id=id, client_id=client_id, realm=realm)
try:
- return open_url(url, method='DELETE', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
- validate_certs=self.validate_certs)
+ return self._request(url, method='DELETE')
except Exception as e:
- self.fail_open_url(e, msg='Could not delete permission %s for client %s in realm %s: %s' % (id, client_id, realm, str(e)))
+ self.fail_request(e, msg='Could not delete permission %s for client %s in realm %s: %s' % (id, client_id, realm, str(e)))
def update_authz_permission(self, payload, permission_type, id, client_id, realm):
"""Update a permission for a Keycloak client"""
url = URL_AUTHZ_PERMISSION.format(url=self.baseurl, permission_type=permission_type, id=id, client_id=client_id, realm=realm)
try:
- return open_url(url, method='PUT', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
- data=json.dumps(payload), validate_certs=self.validate_certs)
+ return self._request(url, method='PUT', data=json.dumps(payload))
except Exception as e:
- self.fail_open_url(e, msg='Could not create update permission %s for client %s in realm %s: %s' % (payload['name'], client_id, realm, str(e)))
+ self.fail_request(e, msg='Could not create update permission %s for client %s in realm %s: %s' % (payload['name'], client_id, realm, str(e)))
def get_authz_resource_by_name(self, name, client_id, realm):
"""Get authorization resource by name"""
@@ -3099,9 +3085,7 @@ class KeycloakAPI(object):
search_url = "%s/search?name=%s" % (url, name.replace(' ', '%20'))
try:
- return json.loads(to_native(open_url(search_url, method='GET', http_agent=self.http_agent, headers=self.restheaders,
- timeout=self.connection_timeout,
- validate_certs=self.validate_certs).read()))
+ return self._request_and_deserialize(search_url, method='GET')
except Exception:
return False
@@ -3111,9 +3095,7 @@ class KeycloakAPI(object):
search_url = "%s/search?name=%s&permission=false" % (url, name.replace(' ', '%20'))
try:
- return json.loads(to_native(open_url(search_url, method='GET', http_agent=self.http_agent, headers=self.restheaders,
- timeout=self.connection_timeout,
- validate_certs=self.validate_certs).read()))
+ return self._request_and_deserialize(search_url, method='GET')
except Exception:
return False
@@ -3126,11 +3108,9 @@ class KeycloakAPI(object):
"""
client_role_scope_url = URL_CLIENT_ROLE_SCOPE_CLIENTS.format(url=self.baseurl, realm=realm, id=clientid, scopeid=clientscopeid)
try:
- return json.loads(to_native(open_url(client_role_scope_url, method='GET', http_agent=self.http_agent, headers=self.restheaders,
- timeout=self.connection_timeout,
- validate_certs=self.validate_certs).read()))
+ return self._request_and_deserialize(client_role_scope_url, method='GET')
except Exception as e:
- self.fail_open_url(e, msg='Could not fetch roles scope for client %s in realm %s: %s' % (clientid, realm, str(e)))
+ self.fail_request(e, msg='Could not fetch roles scope for client %s in realm %s: %s' % (clientid, realm, str(e)))
def update_client_role_scope_from_client(self, payload, clientid, clientscopeid, realm="master"):
""" Update and fetch the roles associated with the client's scope on the Keycloak server.
@@ -3142,11 +3122,10 @@ class KeycloakAPI(object):
"""
client_role_scope_url = URL_CLIENT_ROLE_SCOPE_CLIENTS.format(url=self.baseurl, realm=realm, id=clientid, scopeid=clientscopeid)
try:
- open_url(client_role_scope_url, method='POST', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
- data=json.dumps(payload), validate_certs=self.validate_certs)
+ self._request(client_role_scope_url, method='POST', data=json.dumps(payload))
except Exception as e:
- self.fail_open_url(e, msg='Could not update roles scope for client %s in realm %s: %s' % (clientid, realm, str(e)))
+ self.fail_request(e, msg='Could not update roles scope for client %s in realm %s: %s' % (clientid, realm, str(e)))
return self.get_client_role_scope_from_client(clientid, clientscopeid, realm)
@@ -3160,11 +3139,10 @@ class KeycloakAPI(object):
"""
client_role_scope_url = URL_CLIENT_ROLE_SCOPE_CLIENTS.format(url=self.baseurl, realm=realm, id=clientid, scopeid=clientscopeid)
try:
- open_url(client_role_scope_url, method='DELETE', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
- data=json.dumps(payload), validate_certs=self.validate_certs)
+ self._request(client_role_scope_url, method='DELETE', data=json.dumps(payload))
except Exception as e:
- self.fail_open_url(e, msg='Could not delete roles scope for client %s in realm %s: %s' % (clientid, realm, str(e)))
+ self.fail_request(e, msg='Could not delete roles scope for client %s in realm %s: %s' % (clientid, realm, str(e)))
return self.get_client_role_scope_from_client(clientid, clientscopeid, realm)
@@ -3176,11 +3154,9 @@ class KeycloakAPI(object):
"""
client_role_scope_url = URL_CLIENT_ROLE_SCOPE_REALM.format(url=self.baseurl, realm=realm, id=clientid)
try:
- return json.loads(to_native(open_url(client_role_scope_url, method='GET', http_agent=self.http_agent, headers=self.restheaders,
- timeout=self.connection_timeout,
- validate_certs=self.validate_certs).read()))
+ return self._request_and_deserialize(client_role_scope_url, method='GET')
except Exception as e:
- self.fail_open_url(e, msg='Could not fetch roles scope for client %s in realm %s: %s' % (clientid, realm, str(e)))
+ self.fail_request(e, msg='Could not fetch roles scope for client %s in realm %s: %s' % (clientid, realm, str(e)))
def update_client_role_scope_from_realm(self, payload, clientid, realm="master"):
""" Update and fetch the realm roles from the client's scope on the Keycloak server.
@@ -3191,11 +3167,10 @@ class KeycloakAPI(object):
"""
client_role_scope_url = URL_CLIENT_ROLE_SCOPE_REALM.format(url=self.baseurl, realm=realm, id=clientid)
try:
- open_url(client_role_scope_url, method='POST', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
- data=json.dumps(payload), validate_certs=self.validate_certs)
+ self._request(client_role_scope_url, method='POST', data=json.dumps(payload))
except Exception as e:
- self.fail_open_url(e, msg='Could not update roles scope for client %s in realm %s: %s' % (clientid, realm, str(e)))
+ self.fail_request(e, msg='Could not update roles scope for client %s in realm %s: %s' % (clientid, realm, str(e)))
return self.get_client_role_scope_from_realm(clientid, realm)
@@ -3208,18 +3183,42 @@ class KeycloakAPI(object):
"""
client_role_scope_url = URL_CLIENT_ROLE_SCOPE_REALM.format(url=self.baseurl, realm=realm, id=clientid)
try:
- open_url(client_role_scope_url, method='DELETE', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
- data=json.dumps(payload), validate_certs=self.validate_certs)
+ self._request(client_role_scope_url, method='DELETE', data=json.dumps(payload))
except Exception as e:
- self.fail_open_url(e, msg='Could not delete roles scope for client %s in realm %s: %s' % (clientid, realm, str(e)))
+ self.fail_request(e, msg='Could not delete roles scope for client %s in realm %s: %s' % (clientid, realm, str(e)))
return self.get_client_role_scope_from_realm(clientid, realm)
- def fail_open_url(self, e, msg, **kwargs):
+ def fail_request(self, e, msg, **kwargs):
+ """ Triggers a module failure. This should be called
+ when an exception occurs during/after a request.
+ Attempts to parse the exception e as an HTTP error
+ and append it to msg.
+
+ :param e: exception which triggered the failure
+ :param msg: error message to display to the user
+ :param kwargs: additional arguments to pass to module.fail_json
+ :return: None
+ """
try:
if isinstance(e, HTTPError):
msg = "%s: %s" % (msg, to_native(e.read()))
- except Exception as ingore:
+ except Exception:
pass
self.module.fail_json(msg, **kwargs)
+
+ def fail_open_url(self, e, msg, **kwargs):
+ """ DEPRECATED: Use fail_request instead.
+
+ Triggers a module failure. This should be called
+ when an exception occurs during/after a request.
+ Attempts to parse the exception e as an HTTP error
+ and append it to msg.
+
+ :param e: exception which triggered the failure
+ :param msg: error message to display to the user
+ :param kwargs: additional arguments to pass to module.fail_json
+ :return: None
+ """
+ return self.fail_request(e, msg, **kwargs)
diff --git a/plugins/module_utils/identity/keycloak/keycloak_clientsecret.py b/plugins/module_utils/identity/keycloak/keycloak_clientsecret.py
index 85caa8e16b..366322c9df 100644
--- a/plugins/module_utils/identity/keycloak/keycloak_clientsecret.py
+++ b/plugins/module_utils/identity/keycloak/keycloak_clientsecret.py
@@ -61,7 +61,7 @@ def keycloak_clientsecret_module_resolve_params(module, kc):
client_id = module.params.get('client_id')
# only lookup the client_id if id isn't provided.
- # in the case that both are provided, prefer the ID, since it's one
+ # in the case that both are provided, prefer the ID, since it is one
# less lookup.
if id is None:
# Due to the required_one_of spec, client_id is guaranteed to not be None
diff --git a/plugins/module_utils/known_hosts.py b/plugins/module_utils/known_hosts.py
index 25dd3e174e..9a17355b4e 100644
--- a/plugins/module_utils/known_hosts.py
+++ b/plugins/module_utils/known_hosts.py
@@ -103,13 +103,11 @@ def not_in_host_file(self, host):
continue
try:
- host_fh = open(hf)
+ with open(hf) as host_fh:
+ data = host_fh.read()
except IOError:
hfiles_not_found += 1
continue
- else:
- data = host_fh.read()
- host_fh.close()
for line in data.split("\n"):
if line is None or " " not in line:
diff --git a/plugins/module_utils/mh/base.py b/plugins/module_utils/mh/base.py
index b10762eaba..cf054f59fd 100644
--- a/plugins/module_utils/mh/base.py
+++ b/plugins/module_utils/mh/base.py
@@ -15,6 +15,7 @@ from ansible_collections.community.general.plugins.module_utils.mh.deco import m
class ModuleHelperBase(object):
module = None
ModuleHelperException = _MHE
+ # in 12.0.0 add 'debug' to the tuple
_delegated_to_module = (
'check_mode', 'get_bin_path', 'warn', 'deprecate',
)
@@ -28,6 +29,18 @@ class ModuleHelperBase(object):
if not isinstance(self.module, AnsibleModule):
self.module = AnsibleModule(**self.module)
+ # in 12.0.0 remove this if statement entirely
+ if hasattr(self, 'debug'):
+ msg = (
+ "This class ({cls}) has an attribute 'debug' defined and that is deprecated. "
+ "Method 'debug' will be an integral part of ModuleHelper in community.general "
+ "12.0.0, delegated to the underlying AnsibleModule object. "
+ "Please rename the existing attribute to prevent this message from showing.".format(cls=self.__class__.__name__)
+ )
+ self.deprecate(msg, version="12.0.0", collection_name="community.general")
+ else:
+ self._delegated_to_module = self._delegated_to_module + ('debug',)
+
@property
def diff_mode(self):
return self.module._diff
diff --git a/plugins/module_utils/mh/mixins/deps.py b/plugins/module_utils/mh/mixins/deps.py
deleted file mode 100644
index dd879ff4b2..0000000000
--- a/plugins/module_utils/mh/mixins/deps.py
+++ /dev/null
@@ -1,38 +0,0 @@
-# -*- coding: utf-8 -*-
-# (c) 2020, Alexei Znamensky
-# Copyright (c) 2020, Ansible Project
-# Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause)
-# SPDX-License-Identifier: BSD-2-Clause
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-class DependencyCtxMgr(object):
- """
- DEPRECATION WARNING
-
- This class is deprecated and will be removed in community.general 11.0.0
- Modules should use plugins/module_utils/deps.py instead.
- """
- def __init__(self, name, msg=None):
- self.name = name
- self.msg = msg
- self.has_it = False
- self.exc_type = None
- self.exc_val = None
- self.exc_tb = None
-
- def __enter__(self):
- return self
-
- def __exit__(self, exc_type, exc_val, exc_tb):
- self.has_it = exc_type is None
- self.exc_type = exc_type
- self.exc_val = exc_val
- self.exc_tb = exc_tb
- return not self.has_it
-
- @property
- def text(self):
- return self.msg or str(self.exc_val)
diff --git a/plugins/module_utils/mh/mixins/vars.py b/plugins/module_utils/mh/mixins/vars.py
deleted file mode 100644
index 7db9904f93..0000000000
--- a/plugins/module_utils/mh/mixins/vars.py
+++ /dev/null
@@ -1,153 +0,0 @@
-# -*- coding: utf-8 -*-
-# (c) 2020, Alexei Znamensky
-# Copyright (c) 2020, Ansible Project
-# Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause)
-# SPDX-License-Identifier: BSD-2-Clause
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-import copy
-
-
-class VarMeta(object):
- """
- DEPRECATION WARNING
-
- This class is deprecated and will be removed in community.general 11.0.0
- Modules should use the VarDict from plugins/module_utils/vardict.py instead.
- """
-
- NOTHING = object()
-
- def __init__(self, diff=False, output=True, change=None, fact=False):
- self.init = False
- self.initial_value = None
- self.value = None
-
- self.diff = diff
- self.change = diff if change is None else change
- self.output = output
- self.fact = fact
-
- def set(self, diff=None, output=None, change=None, fact=None, initial_value=NOTHING):
- if diff is not None:
- self.diff = diff
- if output is not None:
- self.output = output
- if change is not None:
- self.change = change
- if fact is not None:
- self.fact = fact
- if initial_value is not self.NOTHING:
- self.initial_value = copy.deepcopy(initial_value)
-
- def set_value(self, value):
- if not self.init:
- self.initial_value = copy.deepcopy(value)
- self.init = True
- self.value = value
- return self
-
- @property
- def has_changed(self):
- return self.change and (self.initial_value != self.value)
-
- @property
- def diff_result(self):
- return None if not (self.diff and self.has_changed) else {
- 'before': self.initial_value,
- 'after': self.value,
- }
-
- def __str__(self):
- return "".format(
- self.value, self.initial_value, self.diff, self.output, self.change
- )
-
-
-class VarDict(object):
- """
- DEPRECATION WARNING
-
- This class is deprecated and will be removed in community.general 11.0.0
- Modules should use the VarDict from plugins/module_utils/vardict.py instead.
- """
- def __init__(self):
- self._data = dict()
- self._meta = dict()
-
- def __getitem__(self, item):
- return self._data[item]
-
- def __setitem__(self, key, value):
- self.set(key, value)
-
- def __getattr__(self, item):
- try:
- return self._data[item]
- except KeyError:
- return getattr(self._data, item)
-
- def __setattr__(self, key, value):
- if key in ('_data', '_meta'):
- super(VarDict, self).__setattr__(key, value)
- else:
- self.set(key, value)
-
- def meta(self, name):
- return self._meta[name]
-
- def set_meta(self, name, **kwargs):
- self.meta(name).set(**kwargs)
-
- def set(self, name, value, **kwargs):
- if name in ('_data', '_meta'):
- raise ValueError("Names _data and _meta are reserved for use by ModuleHelper")
- self._data[name] = value
- if name in self._meta:
- meta = self.meta(name)
- else:
- meta = VarMeta(**kwargs)
- meta.set_value(value)
- self._meta[name] = meta
-
- def output(self):
- return {k: v for k, v in self._data.items() if self.meta(k).output}
-
- def diff(self):
- diff_results = [(k, self.meta(k).diff_result) for k in self._data]
- diff_results = [dr for dr in diff_results if dr[1] is not None]
- if diff_results:
- before = dict((dr[0], dr[1]['before']) for dr in diff_results)
- after = dict((dr[0], dr[1]['after']) for dr in diff_results)
- return {'before': before, 'after': after}
- return None
-
- def facts(self):
- facts_result = {k: v for k, v in self._data.items() if self._meta[k].fact}
- return facts_result if facts_result else None
-
- def change_vars(self):
- return [v for v in self._data if self.meta(v).change]
-
- def has_changed(self, v):
- return self._meta[v].has_changed
-
-
-class VarsMixin(object):
- """
- DEPRECATION WARNING
-
- This class is deprecated and will be removed in community.general 11.0.0
- Modules should use the VarDict from plugins/module_utils/vardict.py instead.
- """
- def __init__(self, module=None):
- self.vars = VarDict()
- super(VarsMixin, self).__init__(module)
-
- def update_vars(self, meta=None, **kwargs):
- if meta is None:
- meta = {}
- for k, v in kwargs.items():
- self.vars.set(k, v, **meta)
diff --git a/plugins/module_utils/mh/module_helper.py b/plugins/module_utils/mh/module_helper.py
index ca95199d9b..f0e2ad6e96 100644
--- a/plugins/module_utils/mh/module_helper.py
+++ b/plugins/module_utils/mh/module_helper.py
@@ -10,13 +10,9 @@ __metaclass__ = type
from ansible.module_utils.common.dict_transformations import dict_merge
-from ansible_collections.community.general.plugins.module_utils.vardict import VarDict as _NewVarDict # remove "as NewVarDict" in 11.0.0
-# (TODO: remove AnsibleModule!) pylint: disable-next=unused-import
-from ansible_collections.community.general.plugins.module_utils.mh.base import AnsibleModule # noqa: F401 DEPRECATED, remove in 11.0.0
+from ansible_collections.community.general.plugins.module_utils.vardict import VarDict
from ansible_collections.community.general.plugins.module_utils.mh.base import ModuleHelperBase
from ansible_collections.community.general.plugins.module_utils.mh.mixins.state import StateMixin
-# (TODO: remove mh.mixins.vars!) pylint: disable-next=unused-import
-from ansible_collections.community.general.plugins.module_utils.mh.mixins.vars import VarsMixin, VarDict as _OldVarDict # noqa: F401 remove in 11.0.0
from ansible_collections.community.general.plugins.module_utils.mh.mixins.deprecate_attrs import DeprecateAttrsMixin
@@ -26,24 +22,11 @@ class ModuleHelper(DeprecateAttrsMixin, ModuleHelperBase):
diff_params = ()
change_params = ()
facts_params = ()
- use_old_vardict = True # remove in 11.0.0
- mute_vardict_deprecation = False
def __init__(self, module=None):
- if self.use_old_vardict: # remove first half of the if in 11.0.0
- self.vars = _OldVarDict()
- super(ModuleHelper, self).__init__(module)
- if not self.mute_vardict_deprecation:
- self.module.deprecate(
- "This class is using the old VarDict from ModuleHelper, which is deprecated. "
- "Set the class variable use_old_vardict to False and make the necessary adjustments."
- "The old VarDict class will be removed in community.general 11.0.0",
- version="11.0.0", collection_name="community.general"
- )
- else:
- self.vars = _NewVarDict()
- super(ModuleHelper, self).__init__(module)
+ super(ModuleHelper, self).__init__(module)
+ self.vars = VarDict()
for name, value in self.module.params.items():
self.vars.set(
name, value,
@@ -66,9 +49,6 @@ class ModuleHelper(DeprecateAttrsMixin, ModuleHelperBase):
self.update_vars(meta={"fact": True}, **kwargs)
def _vars_changed(self):
- if self.use_old_vardict:
- return any(self.vars.has_changed(v) for v in self.vars.change_vars())
-
return self.vars.has_changed
def has_changed(self):
diff --git a/plugins/module_utils/module_helper.py b/plugins/module_utils/module_helper.py
index 366699329a..f70ae3515d 100644
--- a/plugins/module_utils/module_helper.py
+++ b/plugins/module_utils/module_helper.py
@@ -11,12 +11,8 @@ __metaclass__ = type
from ansible_collections.community.general.plugins.module_utils.mh.module_helper import (
ModuleHelper, StateModuleHelper,
- AnsibleModule # remove in 11.0.0
)
-from ansible_collections.community.general.plugins.module_utils.mh.mixins.state import StateMixin # noqa: F401 remove in 11.0.0
-from ansible_collections.community.general.plugins.module_utils.mh.mixins.deps import DependencyCtxMgr # noqa: F401 remove in 11.0.0
from ansible_collections.community.general.plugins.module_utils.mh.exceptions import ModuleHelperException # noqa: F401
from ansible_collections.community.general.plugins.module_utils.mh.deco import (
cause_changes, module_fails_on_exception, check_mode_skip, check_mode_skip_returns,
)
-from ansible_collections.community.general.plugins.module_utils.mh.mixins.vars import VarMeta, VarDict, VarsMixin # noqa: F401 remove in 11.0.0
diff --git a/plugins/module_utils/pacemaker.py b/plugins/module_utils/pacemaker.py
new file mode 100644
index 0000000000..f0f54cce9d
--- /dev/null
+++ b/plugins/module_utils/pacemaker.py
@@ -0,0 +1,73 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2025, Dexter Le
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, cmd_runner_fmt
+
+
+_state_map = {
+ "present": "create",
+ "absent": "remove",
+ "status": "status",
+ "enabled": "enable",
+ "disabled": "disable",
+ "online": "start",
+ "offline": "stop",
+ "maintenance": "set",
+ "config": "config",
+ "cleanup": "cleanup",
+}
+
+
+def fmt_resource_type(value):
+ return [value[k] for k in ['resource_standard', 'resource_provider', 'resource_name'] if value.get(k) is not None]
+
+
+def fmt_resource_operation(value):
+ cmd = []
+ for op in value:
+ cmd.append("op")
+ cmd.append(op.get('operation_action'))
+ for operation_option in op.get('operation_option'):
+ cmd.append(operation_option)
+
+ return cmd
+
+
+def fmt_resource_argument(value):
+ return ['--group' if value['argument_action'] == 'group' else value['argument_action']] + value['argument_option']
+
+
+def get_pacemaker_maintenance_mode(runner):
+ with runner("cli_action config") as ctx:
+ rc, out, err = ctx.run(cli_action="property")
+ maintenance_mode_output = list(filter(lambda string: "maintenance-mode=true" in string.lower(), out.splitlines()))
+ return bool(maintenance_mode_output)
+
+
+def pacemaker_runner(module, **kwargs):
+ runner_command = ['pcs']
+ runner = CmdRunner(
+ module,
+ command=runner_command,
+ arg_formats=dict(
+ cli_action=cmd_runner_fmt.as_list(),
+ state=cmd_runner_fmt.as_map(_state_map),
+ name=cmd_runner_fmt.as_list(),
+ resource_type=cmd_runner_fmt.as_func(fmt_resource_type),
+ resource_option=cmd_runner_fmt.as_list(),
+ resource_operation=cmd_runner_fmt.as_func(fmt_resource_operation),
+ resource_meta=cmd_runner_fmt.stack(cmd_runner_fmt.as_opt_val)("meta"),
+ resource_argument=cmd_runner_fmt.as_func(fmt_resource_argument),
+ apply_all=cmd_runner_fmt.as_bool("--all"),
+ wait=cmd_runner_fmt.as_opt_eq_val("--wait"),
+ config=cmd_runner_fmt.as_fixed("config"),
+ force=cmd_runner_fmt.as_bool("--force"),
+ ),
+ **kwargs
+ )
+ return runner
diff --git a/plugins/module_utils/pipx.py b/plugins/module_utils/pipx.py
index 513b9081f6..bb37712c21 100644
--- a/plugins/module_utils/pipx.py
+++ b/plugins/module_utils/pipx.py
@@ -6,7 +6,11 @@
from __future__ import absolute_import, division, print_function
__metaclass__ = type
-from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, cmd_runner_fmt as fmt
+
+import json
+
+
+from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, cmd_runner_fmt
pipx_common_argspec = {
@@ -36,23 +40,25 @@ _state_map = dict(
def pipx_runner(module, command, **kwargs):
arg_formats = dict(
- state=fmt.as_map(_state_map),
- name=fmt.as_list(),
- name_source=fmt.as_func(fmt.unpack_args(lambda n, s: [s] if s else [n])),
- install_apps=fmt.as_bool("--include-apps"),
- install_deps=fmt.as_bool("--include-deps"),
- inject_packages=fmt.as_list(),
- force=fmt.as_bool("--force"),
- include_injected=fmt.as_bool("--include-injected"),
- index_url=fmt.as_opt_val('--index-url'),
- python=fmt.as_opt_val('--python'),
- system_site_packages=fmt.as_bool("--system-site-packages"),
- _list=fmt.as_fixed(['list', '--include-injected', '--json']),
- editable=fmt.as_bool("--editable"),
- pip_args=fmt.as_opt_eq_val('--pip-args'),
- suffix=fmt.as_opt_val('--suffix'),
+ state=cmd_runner_fmt.as_map(_state_map),
+ name=cmd_runner_fmt.as_list(),
+ name_source=cmd_runner_fmt.as_func(cmd_runner_fmt.unpack_args(lambda n, s: [s] if s else [n])),
+ install_apps=cmd_runner_fmt.as_bool("--include-apps"),
+ install_deps=cmd_runner_fmt.as_bool("--include-deps"),
+ inject_packages=cmd_runner_fmt.as_list(),
+ force=cmd_runner_fmt.as_bool("--force"),
+ include_injected=cmd_runner_fmt.as_bool("--include-injected"),
+ index_url=cmd_runner_fmt.as_opt_val('--index-url'),
+ python=cmd_runner_fmt.as_opt_val('--python'),
+ system_site_packages=cmd_runner_fmt.as_bool("--system-site-packages"),
+ _list=cmd_runner_fmt.as_fixed(['list', '--include-injected', '--json']),
+ editable=cmd_runner_fmt.as_bool("--editable"),
+ pip_args=cmd_runner_fmt.as_opt_eq_val('--pip-args'),
+ suffix=cmd_runner_fmt.as_opt_val('--suffix'),
+ spec_metadata=cmd_runner_fmt.as_list(),
+ version=cmd_runner_fmt.as_fixed('--version'),
)
- arg_formats["global"] = fmt.as_bool("--global")
+ arg_formats["global"] = cmd_runner_fmt.as_bool("--global")
runner = CmdRunner(
module,
@@ -63,3 +69,53 @@ def pipx_runner(module, command, **kwargs):
**kwargs
)
return runner
+
+
+def _make_entry(venv_name, venv, include_injected, include_deps):
+ entry = {
+ 'name': venv_name,
+ 'version': venv['metadata']['main_package']['package_version'],
+ 'pinned': venv['metadata']['main_package'].get('pinned'),
+ }
+ if include_injected:
+ entry['injected'] = {k: v['package_version'] for k, v in venv['metadata']['injected_packages'].items()}
+ if include_deps:
+ entry['dependencies'] = list(venv['metadata']['main_package']['app_paths_of_dependencies'])
+ return entry
+
+
+def make_process_dict(include_injected, include_deps=False):
+ def process_dict(rc, out, err):
+ if not out:
+ return {}
+
+ results = {}
+ raw_data = json.loads(out)
+ for venv_name, venv in raw_data['venvs'].items():
+ results[venv_name] = _make_entry(venv_name, venv, include_injected, include_deps)
+
+ return results, raw_data
+
+ return process_dict
+
+
+def make_process_list(mod_helper, **kwargs):
+ #
+ # ATTENTION!
+ #
+ # The function `make_process_list()` is deprecated and will be removed in community.general 13.0.0
+ #
+ process_dict = make_process_dict(mod_helper, **kwargs)
+
+ def process_list(rc, out, err):
+ res_dict, raw_data = process_dict(rc, out, err)
+
+ if kwargs.get("include_raw"):
+ mod_helper.vars.raw_output = raw_data
+
+ return [
+ entry
+ for name, entry in res_dict.items()
+ if name == kwargs.get("name")
+ ]
+ return process_list
diff --git a/plugins/module_utils/pkg_req.py b/plugins/module_utils/pkg_req.py
new file mode 100644
index 0000000000..8e82ffd360
--- /dev/null
+++ b/plugins/module_utils/pkg_req.py
@@ -0,0 +1,75 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2025, Alexei Znamensky
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from ansible.module_utils.six import raise_from
+
+from ansible_collections.community.general.plugins.module_utils import deps
+
+
+with deps.declare("packaging"):
+ from packaging.requirements import Requirement
+ from packaging.version import parse as parse_version, InvalidVersion
+
+
+class PackageRequirement:
+ def __init__(self, module, name):
+ self.module = module
+ self.parsed_name, self.requirement = self._parse_spec(name)
+
+ def _parse_spec(self, name):
+ """
+ Parse a package name that may include version specifiers using PEP 508.
+ Returns a tuple of (name, requirement) where requirement is of type packaging.requirements.Requirement and it may be None.
+
+ Example inputs:
+ "package"
+ "package>=1.0"
+ "package>=1.0,<2.0"
+ "package[extra]>=1.0"
+ "package[foo,bar]>=1.0,!=1.5"
+
+ :param name: Package name with optional version specifiers and extras
+ :return: Tuple of (name, requirement)
+ :raises ValueError: If the package specification is invalid
+ """
+ if not name:
+ return name, None
+
+ # Quick check for simple package names
+ if not any(c in name for c in '>=
-# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-import traceback
-from time import sleep
-
-PROXMOXER_IMP_ERR = None
-try:
- from proxmoxer import ProxmoxAPI
- from proxmoxer import __version__ as proxmoxer_version
- HAS_PROXMOXER = True
-except ImportError:
- HAS_PROXMOXER = False
- PROXMOXER_IMP_ERR = traceback.format_exc()
-
-
-from ansible.module_utils.basic import env_fallback, missing_required_lib
-from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
-
-
-def proxmox_auth_argument_spec():
- return dict(
- api_host=dict(type='str',
- required=True,
- fallback=(env_fallback, ['PROXMOX_HOST'])
- ),
- api_port=dict(type='int',
- fallback=(env_fallback, ['PROXMOX_PORT'])
- ),
- api_user=dict(type='str',
- required=True,
- fallback=(env_fallback, ['PROXMOX_USER'])
- ),
- api_password=dict(type='str',
- no_log=True,
- fallback=(env_fallback, ['PROXMOX_PASSWORD'])
- ),
- api_token_id=dict(type='str',
- no_log=False
- ),
- api_token_secret=dict(type='str',
- no_log=True
- ),
- validate_certs=dict(type='bool',
- default=False
- ),
- )
-
-
-def proxmox_to_ansible_bool(value):
- '''Convert Proxmox representation of a boolean to be ansible-friendly'''
- return True if value == 1 else False
-
-
-def ansible_to_proxmox_bool(value):
- '''Convert Ansible representation of a boolean to be proxmox-friendly'''
- if value is None:
- return None
-
- if not isinstance(value, bool):
- raise ValueError("%s must be of type bool not %s" % (value, type(value)))
-
- return 1 if value else 0
-
-
-class ProxmoxAnsible(object):
- """Base class for Proxmox modules"""
- TASK_TIMED_OUT = 'timeout expired'
-
- def __init__(self, module):
- if not HAS_PROXMOXER:
- module.fail_json(msg=missing_required_lib('proxmoxer'), exception=PROXMOXER_IMP_ERR)
-
- self.module = module
- self.proxmoxer_version = proxmoxer_version
- self.proxmox_api = self._connect()
- # Test token validity
- try:
- self.proxmox_api.version.get()
- except Exception as e:
- module.fail_json(msg='%s' % e, exception=traceback.format_exc())
-
- def _connect(self):
- api_host = self.module.params['api_host']
- api_port = self.module.params['api_port']
- api_user = self.module.params['api_user']
- api_password = self.module.params['api_password']
- api_token_id = self.module.params['api_token_id']
- api_token_secret = self.module.params['api_token_secret']
- validate_certs = self.module.params['validate_certs']
-
- auth_args = {'user': api_user}
-
- if api_port:
- auth_args['port'] = api_port
-
- if api_password:
- auth_args['password'] = api_password
- else:
- if self.proxmoxer_version < LooseVersion('1.1.0'):
- self.module.fail_json('Using "token_name" and "token_value" require proxmoxer>=1.1.0')
- auth_args['token_name'] = api_token_id
- auth_args['token_value'] = api_token_secret
-
- try:
- return ProxmoxAPI(api_host, verify_ssl=validate_certs, **auth_args)
- except Exception as e:
- self.module.fail_json(msg='%s' % e, exception=traceback.format_exc())
-
- def version(self):
- try:
- apiversion = self.proxmox_api.version.get()
- return LooseVersion(apiversion['version'])
- except Exception as e:
- self.module.fail_json(msg='Unable to retrieve Proxmox VE version: %s' % e)
-
- def get_node(self, node):
- try:
- nodes = [n for n in self.proxmox_api.nodes.get() if n['node'] == node]
- except Exception as e:
- self.module.fail_json(msg='Unable to retrieve Proxmox VE node: %s' % e)
- return nodes[0] if nodes else None
-
- def get_nextvmid(self):
- try:
- return self.proxmox_api.cluster.nextid.get()
- except Exception as e:
- self.module.fail_json(msg='Unable to retrieve next free vmid: %s' % e)
-
- def get_vmid(self, name, ignore_missing=False, choose_first_if_multiple=False):
- try:
- vms = [vm['vmid'] for vm in self.proxmox_api.cluster.resources.get(type='vm') if vm.get('name') == name]
- except Exception as e:
- self.module.fail_json(msg='Unable to retrieve list of VMs filtered by name %s: %s' % (name, e))
-
- if not vms:
- if ignore_missing:
- return None
-
- self.module.fail_json(msg='No VM with name %s found' % name)
- elif len(vms) > 1:
- self.module.fail_json(msg='Multiple VMs with name %s found, provide vmid instead' % name)
-
- return vms[0]
-
- def get_vm(self, vmid, ignore_missing=False):
- try:
- vms = [vm for vm in self.proxmox_api.cluster.resources.get(type='vm') if vm['vmid'] == int(vmid)]
- except Exception as e:
- self.module.fail_json(msg='Unable to retrieve list of VMs filtered by vmid %s: %s' % (vmid, e))
-
- if vms:
- return vms[0]
- else:
- if ignore_missing:
- return None
-
- self.module.fail_json(msg='VM with vmid %s does not exist in cluster' % vmid)
-
- def api_task_ok(self, node, taskid):
- try:
- status = self.proxmox_api.nodes(node).tasks(taskid).status.get()
- return status['status'] == 'stopped' and status['exitstatus'] == 'OK'
- except Exception as e:
- self.module.fail_json(msg='Unable to retrieve API task ID from node %s: %s' % (node, e))
-
- def api_task_complete(self, node_name, task_id, timeout):
- """Wait until the task stops or times out.
-
- :param node_name: Proxmox node name where the task is running.
- :param task_id: ID of the running task.
- :param timeout: Timeout in seconds to wait for the task to complete.
- :return: Task completion status (True/False) and ``exitstatus`` message when status=False.
- """
- status = {}
- while timeout:
- try:
- status = self.proxmox_api.nodes(node_name).tasks(task_id).status.get()
- except Exception as e:
- self.module.fail_json(msg='Unable to retrieve API task ID from node %s: %s' % (node_name, e))
-
- if status['status'] == 'stopped':
- if status['exitstatus'] == 'OK':
- return True, None
- else:
- return False, status['exitstatus']
- else:
- timeout -= 1
- if timeout <= 0:
- return False, ProxmoxAnsible.TASK_TIMED_OUT
- sleep(1)
-
- def get_pool(self, poolid):
- """Retrieve pool information
-
- :param poolid: str - name of the pool
- :return: dict - pool information
- """
- try:
- return self.proxmox_api.pools(poolid).get()
- except Exception as e:
- self.module.fail_json(msg="Unable to retrieve pool %s information: %s" % (poolid, e))
-
- def get_storages(self, type):
- """Retrieve storages information
-
- :param type: str, optional - type of storages
- :return: list of dicts - array of storages
- """
- try:
- return self.proxmox_api.storage.get(type=type)
- except Exception as e:
- self.module.fail_json(msg="Unable to retrieve storages information with type %s: %s" % (type, e))
-
- def get_storage_content(self, node, storage, content=None, vmid=None):
- try:
- return (
- self.proxmox_api.nodes(node)
- .storage(storage)
- .content()
- .get(content=content, vmid=vmid)
- )
- except Exception as e:
- self.module.fail_json(
- msg="Unable to list content on %s, %s for %s and %s: %s"
- % (node, storage, content, vmid, e)
- )
diff --git a/plugins/module_utils/puppet.py b/plugins/module_utils/puppet.py
index e06683b3ee..251d5618d3 100644
--- a/plugins/module_utils/puppet.py
+++ b/plugins/module_utils/puppet.py
@@ -95,10 +95,7 @@ def puppet_runner(module):
skip_tags=cmd_runner_fmt.as_func(lambda v: ["--skip_tags", ",".join(v)]),
certname=cmd_runner_fmt.as_opt_eq_val("--certname"),
noop=cmd_runner_fmt.as_func(noop_func),
- use_srv_records=cmd_runner_fmt.as_map({
- True: "--usr_srv_records",
- False: "--no-usr_srv_records",
- }),
+ use_srv_records=cmd_runner_fmt.as_bool("--usr_srv_records", "--no-usr_srv_records", ignore_none=True),
logdest=cmd_runner_fmt.as_map(_logdest_map, default=[]),
modulepath=cmd_runner_fmt.as_opt_eq_val("--modulepath"),
_execute=cmd_runner_fmt.as_func(execute_func),
diff --git a/plugins/module_utils/python_runner.py b/plugins/module_utils/python_runner.py
index b65867c61e..a8e9e651be 100644
--- a/plugins/module_utils/python_runner.py
+++ b/plugins/module_utils/python_runner.py
@@ -19,7 +19,7 @@ class PythonRunner(CmdRunner):
self.venv = venv
self.has_venv = venv is not None
- if (os.path.isabs(python) or '/' in python):
+ if os.path.isabs(python) or '/' in python:
self.python = python
elif self.has_venv:
if path_prefix is None:
diff --git a/plugins/module_utils/redfish_utils.py b/plugins/module_utils/redfish_utils.py
index 1836bfc7b7..bc93f0e498 100644
--- a/plugins/module_utils/redfish_utils.py
+++ b/plugins/module_utils/redfish_utils.py
@@ -10,9 +10,7 @@ import json
import os
import random
import string
-import gzip
import time
-from io import BytesIO
from ansible.module_utils.urls import open_url
from ansible.module_utils.common.text.converters import to_native
from ansible.module_utils.common.text.converters import to_text
@@ -21,8 +19,6 @@ from ansible.module_utils.six import text_type
from ansible.module_utils.six.moves import http_client
from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
from ansible.module_utils.six.moves.urllib.parse import urlparse
-from ansible.module_utils.ansible_release import __version__ as ansible_version
-from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
GET_HEADERS = {'accept': 'application/json', 'OData-Version': '4.0'}
POST_HEADERS = {'content-type': 'application/json', 'accept': 'application/json',
@@ -38,6 +34,21 @@ FAIL_MSG = 'Issuing a data modification command without specifying the '\
'than one %(resource)s is no longer allowed. Use the `resource_id` '\
'option to specify the target %(resource)s ID.'
+# Use together with the community.general.redfish docs fragment
+REDFISH_COMMON_ARGUMENT_SPEC = {
+ "validate_certs": {
+ "type": "bool",
+ "default": False,
+ },
+ "ca_path": {
+ "type": "path",
+ },
+ "ciphers": {
+ "type": "list",
+ "elements": "str",
+ },
+}
+
class RedfishUtils(object):
@@ -53,9 +64,10 @@ class RedfishUtils(object):
self.resource_id = resource_id
self.data_modification = data_modification
self.strip_etag_quotes = strip_etag_quotes
- self.ciphers = ciphers
+ self.ciphers = ciphers if ciphers is not None else module.params.get("ciphers")
self._vendor = None
- self._init_session()
+ self.validate_certs = module.params.get("validate_certs", False)
+ self.ca_path = module.params.get("ca_path")
def _auth_params(self, headers):
"""
@@ -120,7 +132,7 @@ class RedfishUtils(object):
# Note: This is also a fallthrough for properties that are
# arrays of objects. Some services erroneously omit properties
- # within arrays of objects when not configured, and it's
+ # within arrays of objects when not configured, and it is
# expecting the client to provide them anyway.
if req_pyld[prop] != cur_pyld[prop]:
@@ -133,6 +145,17 @@ class RedfishUtils(object):
resp['msg'] = 'Properties in %s are already set' % uri
return resp
+ def _request(self, uri, **kwargs):
+ kwargs.setdefault("validate_certs", self.validate_certs)
+ kwargs.setdefault("follow_redirects", "all")
+ kwargs.setdefault("use_proxy", True)
+ kwargs.setdefault("timeout", self.timeout)
+ kwargs.setdefault("ciphers", self.ciphers)
+ kwargs.setdefault("ca_path", self.ca_path)
+ resp = open_url(uri, **kwargs)
+ headers = {k.lower(): v for (k, v) in resp.info().items()}
+ return resp, headers
+
# The following functions are to send GET/POST/PATCH/DELETE requests
def get_request(self, uri, override_headers=None, allow_no_resp=False, timeout=None):
req_headers = dict(GET_HEADERS)
@@ -146,19 +169,17 @@ class RedfishUtils(object):
# in case the caller will be using sessions later.
if uri == (self.root_uri + self.service_root):
basic_auth = False
- resp = open_url(uri, method="GET", headers=req_headers,
- url_username=username, url_password=password,
- force_basic_auth=basic_auth, validate_certs=False,
- follow_redirects='all',
- use_proxy=True, timeout=timeout, ciphers=self.ciphers)
- headers = {k.lower(): v for (k, v) in resp.info().items()}
+ resp, headers = self._request(
+ uri,
+ method="GET",
+ headers=req_headers,
+ url_username=username,
+ url_password=password,
+ force_basic_auth=basic_auth,
+ timeout=timeout,
+ )
try:
- if headers.get('content-encoding') == 'gzip' and LooseVersion(ansible_version) < LooseVersion('2.14'):
- # Older versions of Ansible do not automatically decompress the data
- # Starting in 2.14, open_url will decompress the response data by default
- data = json.loads(to_native(gzip.open(BytesIO(resp.read()), 'rt', encoding='utf-8').read()))
- else:
- data = json.loads(to_native(resp.read()))
+ data = json.loads(to_native(resp.read()))
except Exception as e:
# No response data; this is okay in certain cases
data = None
@@ -195,18 +216,20 @@ class RedfishUtils(object):
req_headers['content-type'] = multipart_encoder[1]
else:
data = json.dumps(pyld)
- resp = open_url(uri, data=data,
- headers=req_headers, method="POST",
- url_username=username, url_password=password,
- force_basic_auth=basic_auth, validate_certs=False,
- follow_redirects='all',
- use_proxy=True, timeout=self.timeout, ciphers=self.ciphers)
+ resp, headers = self._request(
+ uri,
+ data=data,
+ headers=req_headers,
+ method="POST",
+ url_username=username,
+ url_password=password,
+ force_basic_auth=basic_auth,
+ )
try:
data = json.loads(to_native(resp.read()))
except Exception as e:
# No response data; this is okay in many cases
data = None
- headers = {k.lower(): v for (k, v) in resp.info().items()}
except HTTPError as e:
msg, data = self._get_extended_message(e)
return {'ret': False,
@@ -249,12 +272,15 @@ class RedfishUtils(object):
username, password, basic_auth = self._auth_params(req_headers)
try:
- resp = open_url(uri, data=json.dumps(pyld),
- headers=req_headers, method="PATCH",
- url_username=username, url_password=password,
- force_basic_auth=basic_auth, validate_certs=False,
- follow_redirects='all',
- use_proxy=True, timeout=self.timeout, ciphers=self.ciphers)
+ resp, dummy = self._request(
+ uri,
+ data=json.dumps(pyld),
+ headers=req_headers,
+ method="PATCH",
+ url_username=username,
+ url_password=password,
+ force_basic_auth=basic_auth,
+ )
except HTTPError as e:
msg, data = self._get_extended_message(e)
return {'ret': False, 'changed': False,
@@ -284,12 +310,15 @@ class RedfishUtils(object):
req_headers['If-Match'] = etag
username, password, basic_auth = self._auth_params(req_headers)
try:
- resp = open_url(uri, data=json.dumps(pyld),
- headers=req_headers, method="PUT",
- url_username=username, url_password=password,
- force_basic_auth=basic_auth, validate_certs=False,
- follow_redirects='all',
- use_proxy=True, timeout=self.timeout, ciphers=self.ciphers)
+ resp, dummy = self._request(
+ uri,
+ data=json.dumps(pyld),
+ headers=req_headers,
+ method="PUT",
+ url_username=username,
+ url_password=password,
+ force_basic_auth=basic_auth,
+ )
except HTTPError as e:
msg, data = self._get_extended_message(e)
return {'ret': False,
@@ -310,12 +339,15 @@ class RedfishUtils(object):
username, password, basic_auth = self._auth_params(req_headers)
try:
data = json.dumps(pyld) if pyld else None
- resp = open_url(uri, data=data,
- headers=req_headers, method="DELETE",
- url_username=username, url_password=password,
- force_basic_auth=basic_auth, validate_certs=False,
- follow_redirects='all',
- use_proxy=True, timeout=self.timeout, ciphers=self.ciphers)
+ resp, dummy = self._request(
+ uri,
+ data=data,
+ headers=req_headers,
+ method="DELETE",
+ url_username=username,
+ url_password=password,
+ force_basic_auth=basic_auth,
+ )
except HTTPError as e:
msg, data = self._get_extended_message(e)
return {'ret': False,
@@ -410,9 +442,6 @@ class RedfishUtils(object):
pass
return msg, data
- def _init_session(self):
- pass
-
def _get_vendor(self):
# If we got the vendor info once, don't get it again
if self._vendor is not None:
@@ -1120,7 +1149,8 @@ class RedfishUtils(object):
key = "Actions"
reset_type_values = ['On', 'ForceOff', 'GracefulShutdown',
'GracefulRestart', 'ForceRestart', 'Nmi',
- 'ForceOn', 'PushPowerButton', 'PowerCycle']
+ 'ForceOn', 'PushPowerButton', 'PowerCycle',
+ 'FullPowerCycle']
# command should be PowerOn, PowerForceOff, etc.
if not command.startswith('Power'):
@@ -1178,7 +1208,7 @@ class RedfishUtils(object):
return response
# If requested to wait for the service to be available again, block
- # until it's ready
+ # until it is ready
if wait:
elapsed_time = 0
start_time = time.time()
@@ -1191,7 +1221,7 @@ class RedfishUtils(object):
while elapsed_time <= wait_timeout:
status = self.check_service_availability()
if status['available']:
- # It's available; we're done
+ # It is available; we are done
break
time.sleep(5)
elapsed_time = time.time() - start_time
@@ -1557,6 +1587,27 @@ class RedfishUtils(object):
resp['msg'] = 'Modified account service'
return resp
+ def update_user_accounttypes(self, user):
+ account_types = user.get('account_accounttypes')
+ oemaccount_types = user.get('account_oemaccounttypes')
+ if account_types is None and oemaccount_types is None:
+ return {'ret': False, 'msg':
+ 'Must provide account_accounttypes or account_oemaccounttypes for UpdateUserAccountTypes command'}
+
+ response = self._find_account_uri(username=user.get('account_username'),
+ acct_id=user.get('account_id'))
+ if not response['ret']:
+ return response
+
+ uri = response['uri']
+ payload = {}
+ if user.get('account_accounttypes'):
+ payload['AccountTypes'] = user.get('account_accounttypes')
+ if user.get('account_oemaccounttypes'):
+ payload['OEMAccountTypes'] = user.get('account_oemaccounttypes')
+
+ return self.patch_request(self.root_uri + uri, payload, check_pyld=True)
+
def check_password_change_required(self, return_data):
"""
Checks a response if a user needs to change their password
@@ -1793,7 +1844,7 @@ class RedfishUtils(object):
operation_results['status'] = data.get('TaskState', data.get('JobState'))
operation_results['messages'] = data.get('Messages', [])
else:
- # Error response body, which is a bit of a misnomer since it's used in successful action responses
+ # Error response body, which is a bit of a misnomer since it is used in successful action responses
operation_results['status'] = 'Completed'
if response.status >= 400:
operation_results['status'] = 'Exception'
@@ -1912,6 +1963,9 @@ class RedfishUtils(object):
targets = update_opts.get('update_targets')
apply_time = update_opts.get('update_apply_time')
oem_params = update_opts.get('update_oem_params')
+ custom_oem_header = update_opts.get('update_custom_oem_header')
+ custom_oem_mime_type = update_opts.get('update_custom_oem_mime_type')
+ custom_oem_params = update_opts.get('update_custom_oem_params')
# Ensure the image file is provided
if not image_file:
@@ -1948,6 +2002,11 @@ class RedfishUtils(object):
'UpdateParameters': {'content': json.dumps(payload), 'mime_type': 'application/json'},
'UpdateFile': {'filename': image_file, 'content': image_payload, 'mime_type': 'application/octet-stream'}
}
+ if custom_oem_params:
+ multipart_payload[custom_oem_header] = {'content': custom_oem_params}
+ if custom_oem_mime_type:
+ multipart_payload[custom_oem_header]['mime_type'] = custom_oem_mime_type
+
response = self.post_request(self.root_uri + update_uri, multipart_payload, multipart=True)
if response['ret'] is False:
return response
@@ -3923,3 +3982,38 @@ class RedfishUtils(object):
"rsp_uri": rsp_uri
}
return res
+
+ def get_accountservice_properties(self):
+ # Find the AccountService resource
+ response = self.get_request(self.root_uri + self.service_root)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+ accountservice_uri = data.get("AccountService", {}).get("@odata.id")
+ if accountservice_uri is None:
+ return {'ret': False, 'msg': "AccountService resource not found"}
+
+ response = self.get_request(self.root_uri + accountservice_uri)
+ if response['ret'] is False:
+ return response
+ return {
+ 'ret': True,
+ 'entries': response['data']
+ }
+
+ def get_power_restore_policy(self, systems_uri):
+ # Retrieve System resource
+ response = self.get_request(self.root_uri + systems_uri)
+ if response['ret'] is False:
+ return response
+ return {
+ 'ret': True,
+ 'entries': response['data']['PowerRestorePolicy']
+ }
+
+ def get_multi_power_restore_policy(self):
+ return self.aggregate_systems(self.get_power_restore_policy)
+
+ def set_power_restore_policy(self, policy):
+ body = {'PowerRestorePolicy': policy}
+ return self.patch_request(self.root_uri + self.systems_uri, body, check_pyld=True)
diff --git a/plugins/module_utils/redhat.py b/plugins/module_utils/redhat.py
deleted file mode 100644
index 321386a0a5..0000000000
--- a/plugins/module_utils/redhat.py
+++ /dev/null
@@ -1,76 +0,0 @@
-# -*- coding: utf-8 -*-
-# This code is part of Ansible, but is an independent component.
-# This particular file snippet, and this file snippet only, is BSD licensed.
-# Modules you write using this snippet, which is embedded dynamically by Ansible
-# still belong to the author of the module, and may assign their own license
-# to the complete work.
-#
-# Copyright (c), James Laska
-#
-# Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause)
-# SPDX-License-Identifier: BSD-2-Clause
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-
-import os
-import shutil
-import tempfile
-
-from ansible.module_utils.six.moves import configparser
-
-
-class RegistrationBase(object):
- """
- DEPRECATION WARNING
-
- This class is deprecated and will be removed in community.general 10.0.0.
- There is no replacement for it; please contact the community.general
- maintainers in case you are using it.
- """
-
- def __init__(self, module, username=None, password=None):
- self.module = module
- self.username = username
- self.password = password
-
- def configure(self):
- raise NotImplementedError("Must be implemented by a sub-class")
-
- def enable(self):
- # Remove any existing redhat.repo
- redhat_repo = '/etc/yum.repos.d/redhat.repo'
- if os.path.isfile(redhat_repo):
- os.unlink(redhat_repo)
-
- def register(self):
- raise NotImplementedError("Must be implemented by a sub-class")
-
- def unregister(self):
- raise NotImplementedError("Must be implemented by a sub-class")
-
- def unsubscribe(self):
- raise NotImplementedError("Must be implemented by a sub-class")
-
- def update_plugin_conf(self, plugin, enabled=True):
- plugin_conf = '/etc/yum/pluginconf.d/%s.conf' % plugin
-
- if os.path.isfile(plugin_conf):
- tmpfd, tmpfile = tempfile.mkstemp()
- shutil.copy2(plugin_conf, tmpfile)
- cfg = configparser.ConfigParser()
- cfg.read([tmpfile])
-
- if enabled:
- cfg.set('main', 'enabled', 1)
- else:
- cfg.set('main', 'enabled', 0)
-
- fd = open(tmpfile, 'w+')
- cfg.write(fd)
- fd.close()
- self.module.atomic_move(tmpfile, plugin_conf)
-
- def subscribe(self, **kwargs):
- raise NotImplementedError("Must be implemented by a sub-class")
diff --git a/plugins/module_utils/snap.py b/plugins/module_utils/snap.py
index 253269b9a9..e55a3a13a5 100644
--- a/plugins/module_utils/snap.py
+++ b/plugins/module_utils/snap.py
@@ -41,8 +41,15 @@ def snap_runner(module, **kwargs):
options=cmd_runner_fmt.as_list(),
info=cmd_runner_fmt.as_fixed("info"),
dangerous=cmd_runner_fmt.as_bool("--dangerous"),
+ version=cmd_runner_fmt.as_fixed("version"),
),
check_rc=False,
**kwargs
)
return runner
+
+
+def get_version(runner):
+ with runner("version") as ctx:
+ rc, out, err = ctx.run()
+ return dict(x.split() for x in out.splitlines() if len(x.split()) == 2)
diff --git a/plugins/module_utils/systemd.py b/plugins/module_utils/systemd.py
new file mode 100644
index 0000000000..5d74118d12
--- /dev/null
+++ b/plugins/module_utils/systemd.py
@@ -0,0 +1,34 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2025, Marco Noce
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, cmd_runner_fmt
+
+
+def systemd_runner(module, command, **kwargs):
+ arg_formats = dict(
+ version=cmd_runner_fmt.as_fixed("--version"),
+ list_units=cmd_runner_fmt.as_fixed(["list-units", "--no-pager"]),
+ types=cmd_runner_fmt.as_func(lambda v: [] if not v else ["--type", ",".join(v)]),
+ all=cmd_runner_fmt.as_fixed("--all"),
+ plain=cmd_runner_fmt.as_fixed("--plain"),
+ no_legend=cmd_runner_fmt.as_fixed("--no-legend"),
+ show=cmd_runner_fmt.as_fixed("show"),
+ props=cmd_runner_fmt.as_func(lambda v: [] if not v else ["-p", ",".join(v)]),
+ dashdash=cmd_runner_fmt.as_fixed("--"),
+ unit=cmd_runner_fmt.as_list(),
+ )
+
+ runner = CmdRunner(
+ module,
+ command=command,
+ arg_formats=arg_formats,
+ check_rc=True,
+ **kwargs
+ )
+ return runner
diff --git a/plugins/module_utils/xdg_mime.py b/plugins/module_utils/xdg_mime.py
new file mode 100644
index 0000000000..f84b9ef7ea
--- /dev/null
+++ b/plugins/module_utils/xdg_mime.py
@@ -0,0 +1,36 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2025, Marcos Alano
+# Based on gio_mime module. Copyright (c) 2022, Alexei Znamensky
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, cmd_runner_fmt
+
+
+def xdg_mime_runner(module, **kwargs):
+ return CmdRunner(
+ module,
+ command=['xdg-mime'],
+ arg_formats=dict(
+ default=cmd_runner_fmt.as_fixed('default'),
+ query=cmd_runner_fmt.as_fixed('query'),
+ mime_types=cmd_runner_fmt.as_list(),
+ handler=cmd_runner_fmt.as_list(),
+ version=cmd_runner_fmt.as_fixed('--version'),
+ ),
+ **kwargs
+ )
+
+
+def xdg_mime_get(runner, mime_type):
+ def process(rc, out, err):
+ if not out.strip():
+ return None
+ out = out.splitlines()[0]
+ return out.split()[-1]
+
+ with runner("query default mime_types", output_process=process) as ctx:
+ return ctx.run(mime_types=mime_type)
diff --git a/plugins/module_utils/xfconf.py b/plugins/module_utils/xfconf.py
index b63518d0c4..344bd1f3c9 100644
--- a/plugins/module_utils/xfconf.py
+++ b/plugins/module_utils/xfconf.py
@@ -7,10 +7,10 @@ from __future__ import absolute_import, division, print_function
__metaclass__ = type
from ansible.module_utils.parsing.convert_bool import boolean
-from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, cmd_runner_fmt as fmt
+from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, cmd_runner_fmt
-@fmt.unpack_args
+@cmd_runner_fmt.unpack_args
def _values_fmt(values, value_types):
result = []
for value, value_type in zip(values, value_types):
@@ -25,14 +25,21 @@ def xfconf_runner(module, **kwargs):
module,
command='xfconf-query',
arg_formats=dict(
- channel=fmt.as_opt_val("--channel"),
- property=fmt.as_opt_val("--property"),
- force_array=fmt.as_bool("--force-array"),
- reset=fmt.as_bool("--reset"),
- create=fmt.as_bool("--create"),
- list_arg=fmt.as_bool("--list"),
- values_and_types=fmt.as_func(_values_fmt),
+ channel=cmd_runner_fmt.as_opt_val("--channel"),
+ property=cmd_runner_fmt.as_opt_val("--property"),
+ force_array=cmd_runner_fmt.as_bool("--force-array"),
+ reset=cmd_runner_fmt.as_bool("--reset"),
+ create=cmd_runner_fmt.as_bool("--create"),
+ list_arg=cmd_runner_fmt.as_bool("--list"),
+ values_and_types=_values_fmt,
+ version=cmd_runner_fmt.as_fixed("--version"),
),
**kwargs
)
return runner
+
+
+def get_xfconf_version(runner):
+ with runner("version") as ctx:
+ rc, out, err = ctx.run()
+ return out.splitlines()[0].split()[1]
diff --git a/plugins/modules/aerospike_migrations.py b/plugins/modules/aerospike_migrations.py
index 1eee5b1a2f..d9440fdb4e 100644
--- a/plugins/modules/aerospike_migrations.py
+++ b/plugins/modules/aerospike_migrations.py
@@ -9,15 +9,14 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
module: aerospike_migrations
short_description: Check or wait for migrations between nodes
description:
- - This can be used to check for migrations in a cluster.
- This makes it easy to do a rolling upgrade/update on Aerospike nodes.
- - If waiting for migrations is not desired, simply just poll until
- port 3000 if available or asinfo -v status returns ok
+ - This can be used to check for migrations in a cluster. This makes it easy to do a rolling upgrade/update on Aerospike
+ nodes.
+ - If waiting for migrations is not desired, simply just poll until port 3000 if available or C(asinfo -v status) returns
+ ok.
author: "Albert Autin (@Alb0t)"
extends_documentation_fragment:
- community.general.attributes
@@ -27,92 +26,83 @@ attributes:
diff_mode:
support: none
options:
- host:
- description:
- - Which host do we use as seed for info connection
- required: false
- type: str
- default: localhost
- port:
- description:
- - Which port to connect to Aerospike on (service port)
- required: false
- type: int
- default: 3000
- connect_timeout:
- description:
- - How long to try to connect before giving up (milliseconds)
- required: false
- type: int
- default: 1000
- consecutive_good_checks:
- description:
- - How many times should the cluster report "no migrations"
- consecutively before returning OK back to ansible?
- required: false
- type: int
- default: 3
- sleep_between_checks:
- description:
- - How long to sleep between each check (seconds).
- required: false
- type: int
- default: 60
- tries_limit:
- description:
- - How many times do we poll before giving up and failing?
- default: 300
- required: false
- type: int
- local_only:
- description:
- - Do you wish to only check for migrations on the local node
- before returning, or do you want all nodes in the cluster
- to finish before returning?
- required: true
- type: bool
- min_cluster_size:
- description:
- - Check will return bad until cluster size is met
- or until tries is exhausted
- required: false
- type: int
- default: 1
- fail_on_cluster_change:
- description:
- - Fail if the cluster key changes
- if something else is changing the cluster, we may want to fail
- required: false
- type: bool
- default: true
- migrate_tx_key:
- description:
- - The metric key used to determine if we have tx migrations
- remaining. Changeable due to backwards compatibility.
- required: false
- type: str
- default: migrate_tx_partitions_remaining
- migrate_rx_key:
- description:
- - The metric key used to determine if we have rx migrations
- remaining. Changeable due to backwards compatibility.
- required: false
- type: str
- default: migrate_rx_partitions_remaining
- target_cluster_size:
- description:
- - When all aerospike builds in the cluster are greater than
- version 4.3, then the C(cluster-stable) info command will be used.
- Inside this command, you can optionally specify what the target
- cluster size is - but it is not necessary. You can still rely on
- min_cluster_size if you don't want to use this option.
- - If this option is specified on a cluster that has at least 1
- host <4.3 then it will be ignored until the min version reaches
- 4.3.
- required: false
- type: int
-'''
-EXAMPLES = '''
+ host:
+ description:
+ - Which host do we use as seed for info connection.
+ type: str
+ default: localhost
+ port:
+ description:
+ - Which port to connect to Aerospike on (service port).
+ required: false
+ type: int
+ default: 3000
+ connect_timeout:
+ description:
+ - How long to try to connect before giving up (milliseconds).
+ required: false
+ type: int
+ default: 1000
+ consecutive_good_checks:
+ description:
+ - How many times should the cluster report "no migrations" consecutively before returning OK back to ansible?
+ required: false
+ type: int
+ default: 3
+ sleep_between_checks:
+ description:
+ - How long to sleep between each check (seconds).
+ required: false
+ type: int
+ default: 60
+ tries_limit:
+ description:
+ - How many times do we poll before giving up and failing?
+ default: 300
+ required: false
+ type: int
+ local_only:
+ description:
+ - Do you wish to only check for migrations on the local node before returning, or do you want all nodes in the cluster
+ to finish before returning?
+ required: true
+ type: bool
+ min_cluster_size:
+ description:
+ - Check fails until cluster size is met or until tries is exhausted.
+ required: false
+ type: int
+ default: 1
+ fail_on_cluster_change:
+ description:
+ - Fail if the cluster key changes if something else is changing the cluster, we may want to fail.
+ required: false
+ type: bool
+ default: true
+ migrate_tx_key:
+ description:
+ - The metric key used to determine if we have tx migrations remaining. Changeable due to backwards compatibility.
+ required: false
+ type: str
+ default: migrate_tx_partitions_remaining
+ migrate_rx_key:
+ description:
+ - The metric key used to determine if we have rx migrations remaining. Changeable due to backwards compatibility.
+ required: false
+ type: str
+ default: migrate_rx_partitions_remaining
+ target_cluster_size:
+ description:
+ - When all aerospike builds in the cluster are greater than version 4.3, then the C(cluster-stable) info command is
+ used. Inside this command, you can optionally specify what the target cluster size is - but it is not necessary.
+ You can still rely on O(min_cluster_size) if you do not want to use this option.
+ - If this option is specified on a cluster that has at least one host <4.3 then it is ignored until the min version
+ reaches 4.3.
+ required: false
+ type: int
+"""
+
+EXAMPLES = r"""
# check for migrations on local node
- name: Wait for migrations on local node before proceeding
community.general.aerospike_migrations:
@@ -132,13 +122,13 @@ EXAMPLES = '''
- name: Install dependencies
ansible.builtin.apt:
name:
- - python
- - python-pip
- - python-setuptools
+ - python
+ - python-pip
+ - python-setuptools
state: latest
- name: Setup aerospike
ansible.builtin.pip:
- name: aerospike
+ name: aerospike
# check for migrations every (sleep_between_checks)
# If at least (consecutive_good_checks) checks come back OK in a row, then return OK.
# Will exit if any exception, which can be caused by bad nodes,
@@ -147,13 +137,13 @@ EXAMPLES = '''
# Tries Limit * Sleep Between Checks * delay * retries
- name: Wait for aerospike migrations
community.general.aerospike_migrations:
- local_only: true
- sleep_between_checks: 1
- tries_limit: 5
- consecutive_good_checks: 3
- fail_on_cluster_change: true
- min_cluster_size: 3
- target_cluster_size: 4
+ local_only: true
+ sleep_between_checks: 1
+ tries_limit: 5
+ consecutive_good_checks: 3
+ fail_on_cluster_change: true
+ min_cluster_size: 3
+ target_cluster_size: 4
register: migrations_check
until: migrations_check is succeeded
changed_when: false
@@ -161,14 +151,14 @@ EXAMPLES = '''
retries: 120
- name: Another thing
ansible.builtin.shell: |
- echo foo
+ echo foo
- name: Reboot
ansible.builtin.reboot:
-'''
+"""
-RETURN = '''
+RETURN = r"""
# Returns only a success/failure result. Changed is always false.
-'''
+"""
import traceback
@@ -189,19 +179,19 @@ else:
def run_module():
"""run ansible module"""
module_args = dict(
- host=dict(type='str', required=False, default='localhost'),
- port=dict(type='int', required=False, default=3000),
- connect_timeout=dict(type='int', required=False, default=1000),
- consecutive_good_checks=dict(type='int', required=False, default=3),
- sleep_between_checks=dict(type='int', required=False, default=60),
- tries_limit=dict(type='int', required=False, default=300),
+ host=dict(type='str', default='localhost'),
+ port=dict(type='int', default=3000),
+ connect_timeout=dict(type='int', default=1000),
+ consecutive_good_checks=dict(type='int', default=3),
+ sleep_between_checks=dict(type='int', default=60),
+ tries_limit=dict(type='int', default=300),
local_only=dict(type='bool', required=True),
- min_cluster_size=dict(type='int', required=False, default=1),
- target_cluster_size=dict(type='int', required=False, default=None),
- fail_on_cluster_change=dict(type='bool', required=False, default=True),
- migrate_tx_key=dict(type='str', required=False, no_log=False,
+ min_cluster_size=dict(type='int', default=1),
+ target_cluster_size=dict(type='int'),
+ fail_on_cluster_change=dict(type='bool', default=True),
+ migrate_tx_key=dict(type='str', no_log=False,
default="migrate_tx_partitions_remaining"),
- migrate_rx_key=dict(type='str', required=False, no_log=False,
+ migrate_rx_key=dict(type='str', no_log=False,
default="migrate_rx_partitions_remaining")
)
diff --git a/plugins/modules/airbrake_deployment.py b/plugins/modules/airbrake_deployment.py
index bad1b2c9d4..0fe04f21d6 100644
--- a/plugins/modules/airbrake_deployment.py
+++ b/plugins/modules/airbrake_deployment.py
@@ -9,15 +9,14 @@ from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
module: airbrake_deployment
author:
-- "Bruce Pennypacker (@bpennypacker)"
-- "Patrick Humpal (@phumpal)"
+ - "Bruce Pennypacker (@bpennypacker)"
+ - "Patrick Humpal (@phumpal)"
short_description: Notify airbrake about app deployments
description:
- - Notify airbrake about app deployments (see U(https://airbrake.io/docs/api/#deploys-v4)).
+ - Notify airbrake about app deployments (see U(https://airbrake.io/docs/api/#deploys-v4)).
extends_documentation_fragment:
- community.general.attributes
attributes:
@@ -28,7 +27,7 @@ attributes:
options:
project_id:
description:
- - Airbrake PROJECT_ID
+ - Airbrake PROJECT_ID.
required: true
type: str
version_added: '0.2.0'
@@ -40,27 +39,27 @@ options:
version_added: '0.2.0'
environment:
description:
- - The airbrake environment name, typically 'production', 'staging', etc.
+ - The airbrake environment name, typically v(production), V(staging), and so on.
required: true
type: str
user:
description:
- - The username of the person doing the deployment
+ - The username of the person doing the deployment.
required: false
type: str
repo:
description:
- - URL of the project repository
+ - URL of the project repository.
required: false
type: str
revision:
description:
- - A hash, number, tag, or other identifier showing what revision from version control was deployed
+ - A hash, number, tag, or other identifier showing what revision from version control was deployed.
required: false
type: str
version:
description:
- - A string identifying what version was deployed
+ - A string identifying what version was deployed.
required: false
type: str
version_added: '1.0.0'
@@ -72,16 +71,16 @@ options:
type: str
validate_certs:
description:
- - If V(false), SSL certificates for the target url will not be validated. This should only be used
- on personally controlled sites using self-signed certificates.
+ - If V(false), SSL certificates for the target URL is not validated. This should only be used on personally controlled
+ sites using self-signed certificates.
required: false
default: true
type: bool
requirements: []
-'''
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Notify airbrake about an app deployment
community.general.airbrake_deployment:
project_id: '12345'
@@ -98,7 +97,7 @@ EXAMPLES = '''
user: ansible
revision: 'e54dd3a01f2c421b558ef33b5f79db936e2dcf15'
version: '0.2.0'
-'''
+"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.urls import fetch_url
@@ -115,11 +114,11 @@ def main():
project_id=dict(required=True, no_log=True, type='str'),
project_key=dict(required=True, no_log=True, type='str'),
environment=dict(required=True, type='str'),
- user=dict(required=False, type='str'),
- repo=dict(required=False, type='str'),
- revision=dict(required=False, type='str'),
- version=dict(required=False, type='str'),
- url=dict(required=False, default='https://api.airbrake.io/api/v4/projects/', type='str'),
+ user=dict(type='str'),
+ repo=dict(type='str'),
+ revision=dict(type='str'),
+ version=dict(type='str'),
+ url=dict(default='https://api.airbrake.io/api/v4/projects/', type='str'),
validate_certs=dict(default=True, type='bool'),
),
supports_check_mode=True,
diff --git a/plugins/modules/aix_devices.py b/plugins/modules/aix_devices.py
index a0f3cf48d9..68dbfb72d2 100644
--- a/plugins/modules/aix_devices.py
+++ b/plugins/modules/aix_devices.py
@@ -8,14 +8,13 @@
from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = r'''
----
+DOCUMENTATION = r"""
author:
-- Kairo Araujo (@kairoaraujo)
+ - Kairo Araujo (@kairoaraujo)
module: aix_devices
short_description: Manages AIX devices
description:
-- This module discovers, defines, removes and modifies attributes of AIX devices.
+ - This module discovers, defines, removes and modifies attributes of AIX devices.
extends_documentation_fragment:
- community.general.attributes
attributes:
@@ -26,35 +25,35 @@ attributes:
options:
attributes:
description:
- - A list of device attributes.
+ - A list of device attributes.
type: dict
device:
description:
- - The name of the device.
- - V(all) is valid to rescan C(available) all devices (AIX cfgmgr command).
+ - The name of the device.
+ - V(all) is valid to rescan C(available) all devices (AIX C(cfgmgr) command).
type: str
force:
description:
- - Forces action.
+ - Forces action.
type: bool
default: false
recursive:
description:
- - Removes or defines a device and children devices.
+ - Removes or defines a device and children devices.
type: bool
default: false
state:
description:
- - Controls the device state.
- - V(available) (alias V(present)) rescan a specific device or all devices (when O(device) is not specified).
- - V(removed) (alias V(absent) removes a device.
- - V(defined) changes device to Defined state.
+ - Controls the device state.
+ - V(available) (alias V(present)) rescan a specific device or all devices (when O(device) is not specified).
+ - V(removed) (alias V(absent) removes a device.
+ - V(defined) changes device to Defined state.
type: str
- choices: [ available, defined, removed ]
+ choices: [available, defined, removed]
default: available
-'''
+"""
-EXAMPLES = r'''
+EXAMPLES = r"""
- name: Scan new devices
community.general.aix_devices:
device: all
@@ -126,9 +125,9 @@ EXAMPLES = r'''
attributes:
alias4: 10.0.0.100,255.255.255.0
state: available
-'''
+"""
-RETURN = r''' # '''
+RETURN = r""" # """
from ansible.module_utils.basic import AnsibleModule
diff --git a/plugins/modules/aix_filesystem.py b/plugins/modules/aix_filesystem.py
index 4a3775c672..92a734e8ac 100644
--- a/plugins/modules/aix_filesystem.py
+++ b/plugins/modules/aix_filesystem.py
@@ -9,15 +9,13 @@ from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = r'''
----
+DOCUMENTATION = r"""
author:
- Kairo Araujo (@kairoaraujo)
module: aix_filesystem
short_description: Configure LVM and NFS file systems for AIX
description:
- - This module creates, removes, mount and unmount LVM and NFS file system for
- AIX using C(/etc/filesystems).
+ - This module creates, removes, mount and unmount LVM and NFS file system for AIX using C(/etc/filesystems).
- For LVM file systems is possible to resize a file system.
extends_documentation_fragment:
- community.general.attributes
@@ -49,7 +47,7 @@ options:
description:
- Logical volume (LV) device name or remote export device to create a NFS file system.
- It is used to create a file system on an already existing logical volume or the exported NFS file system.
- - If not mentioned a new logical volume name will be created following AIX standards (LVM).
+ - If not mentioned a new logical volume name is created following AIX standards (LVM).
type: str
fs_type:
description:
@@ -60,7 +58,7 @@ options:
description:
- Set file system permissions. V(rw) (read-write) or V(ro) (read-only).
type: str
- choices: [ ro, rw ]
+ choices: [ro, rw]
default: rw
mount_group:
description:
@@ -83,15 +81,14 @@ options:
size:
description:
- Specifies the file system size.
- - For already V(present) it will be resized.
- - 512-byte blocks, Megabytes or Gigabytes. If the value has M specified
- it will be in Megabytes. If the value has G specified it will be in
- Gigabytes.
- - If no M or G the value will be 512-byte blocks.
- - If "+" is specified in begin of value, the value will be added.
- - If "-" is specified in begin of value, the value will be removed.
- - If "+" or "-" is not specified, the total value will be the specified.
- - Size will respects the LVM AIX standards.
+ - For already present it resizes the filesystem.
+ - 512-byte blocks, megabytes or gigabytes. If the value has M specified it is in megabytes. If the value has G specified
+ it is in gigabytes.
+ - If no M or G the value is 512-byte blocks.
+ - If V(+) is specified in begin of value, the value is added.
+ - If V(-) is specified in begin of value, the value is removed.
+ - If neither V(+) nor V(-) is specified, then the total value is the specified.
+ - Size respects the LVM AIX standards.
type: str
state:
description:
@@ -101,7 +98,7 @@ options:
- V(mounted) checks if the file system is mounted or mount the file system.
- V(unmounted) check if the file system is unmounted or unmount the file system.
type: str
- choices: [ absent, mounted, present, unmounted ]
+ choices: [absent, mounted, present, unmounted]
default: present
vg:
description:
@@ -109,9 +106,9 @@ options:
type: str
notes:
- For more O(attributes), please check "crfs" AIX manual.
-'''
+"""
-EXAMPLES = r'''
+EXAMPLES = r"""
- name: Create filesystem in a previously defined logical volume.
community.general.aix_filesystem:
device: testlv
@@ -166,18 +163,8 @@ EXAMPLES = r'''
filesystem: /newfs
rm_mount_point: true
state: absent
-'''
+"""
-RETURN = r'''
-changed:
- description: Return changed for aix_filesystems actions as true or false.
- returned: always
- type: bool
-msg:
- description: Return message regarding the action.
- returned: always
- type: str
-'''
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.general.plugins.module_utils._mount import ismount
diff --git a/plugins/modules/aix_inittab.py b/plugins/modules/aix_inittab.py
index 79336bab8d..ece4e95547 100644
--- a/plugins/modules/aix_inittab.py
+++ b/plugins/modules/aix_inittab.py
@@ -8,16 +8,15 @@
from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = r'''
----
+DOCUMENTATION = r"""
author:
- - Joris Weijters (@molekuul)
+ - Joris Weijters (@molekuul)
module: aix_inittab
-short_description: Manages the inittab on AIX
+short_description: Manages the C(inittab) on AIX
description:
- - Manages the inittab on AIX.
+ - Manages the C(inittab) on AIX.
extends_documentation_fragment:
- - community.general.attributes
+ - community.general.attributes
attributes:
check_mode:
support: full
@@ -26,56 +25,56 @@ attributes:
options:
name:
description:
- - Name of the inittab entry.
+ - Name of the C(inittab) entry.
type: str
required: true
- aliases: [ service ]
+ aliases: [service]
runlevel:
description:
- - Runlevel of the entry.
+ - Runlevel of the entry.
type: str
required: true
action:
description:
- - Action what the init has to do with this entry.
+ - Action what the init has to do with this entry.
type: str
choices:
- - boot
- - bootwait
- - hold
- - initdefault
- - 'off'
- - once
- - ondemand
- - powerfail
- - powerwait
- - respawn
- - sysinit
- - wait
+ - boot
+ - bootwait
+ - hold
+ - initdefault
+ - 'off'
+ - once
+ - ondemand
+ - powerfail
+ - powerwait
+ - respawn
+ - sysinit
+ - wait
command:
description:
- - What command has to run.
+ - What command has to run.
type: str
required: true
insertafter:
description:
- - After which inittabline should the new entry inserted.
+ - After which inittabline should the new entry inserted.
type: str
state:
description:
- - Whether the entry should be present or absent in the inittab file.
+ - Whether the entry should be present or absent in the inittab file.
type: str
- choices: [ absent, present ]
+ choices: [absent, present]
default: present
notes:
- The changes are persistent across reboots.
- You need root rights to read or adjust the inittab with the C(lsitab), C(chitab), C(mkitab) or C(rmitab) commands.
- Tested on AIX 7.1.
requirements:
-- itertools
-'''
+ - itertools
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
# Add service startmyservice to the inittab, directly after service existingservice.
- name: Add startmyservice to inittab
community.general.aix_inittab:
@@ -105,25 +104,15 @@ EXAMPLES = '''
command: echo hello
state: absent
become: true
-'''
+"""
-RETURN = '''
+RETURN = r"""
name:
- description: Name of the adjusted inittab entry
- returned: always
- type: str
- sample: startmyservice
-msg:
- description: Action done with the inittab entry
- returned: changed
- type: str
- sample: changed inittab entry startmyservice
-changed:
- description: Whether the inittab changed or not
- returned: always
- type: bool
- sample: true
-'''
+ description: Name of the adjusted C(inittab) entry.
+ returned: always
+ type: str
+ sample: startmyservice
+"""
# Import necessary libraries
try:
diff --git a/plugins/modules/aix_lvg.py b/plugins/modules/aix_lvg.py
index 2892a68ad9..c41e21124e 100644
--- a/plugins/modules/aix_lvg.py
+++ b/plugins/modules/aix_lvg.py
@@ -8,8 +8,7 @@
from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = r'''
----
+DOCUMENTATION = r"""
author:
- Kairo Araujo (@kairoaraujo)
module: aix_lvg
@@ -26,43 +25,43 @@ attributes:
options:
force:
description:
- - Force volume group creation.
+ - Force volume group creation.
type: bool
default: false
pp_size:
description:
- - The size of the physical partition in megabytes.
+ - The size of the physical partition in megabytes.
type: int
pvs:
description:
- - List of comma-separated devices to use as physical devices in this volume group.
- - Required when creating or extending (V(present) state) the volume group.
- - If not informed reducing (V(absent) state) the volume group will be removed.
+ - List of comma-separated devices to use as physical devices in this volume group.
+ - Required when creating or extending (V(present) state) the volume group.
+ - If not informed reducing (V(absent) state) the volume group is removed.
type: list
elements: str
state:
description:
- - Control if the volume group exists and volume group AIX state varyonvg V(varyon) or varyoffvg V(varyoff).
+ - Control if the volume group exists and volume group AIX state varyonvg V(varyon) or varyoffvg V(varyoff).
type: str
- choices: [ absent, present, varyoff, varyon ]
+ choices: [absent, present, varyoff, varyon]
default: present
vg:
description:
- - The name of the volume group.
+ - The name of the volume group.
type: str
required: true
vg_type:
description:
- - The type of the volume group.
+ - The type of the volume group.
type: str
- choices: [ big, normal, scalable ]
+ choices: [big, normal, scalable]
default: normal
notes:
-- AIX will permit remove VG only if all LV/Filesystems are not busy.
-- Module does not modify PP size for already present volume group.
-'''
+ - AIX allows removing VG only if all LV/Filesystems are not busy.
+ - Module does not modify PP size for already present volume group.
+"""
-EXAMPLES = r'''
+EXAMPLES = r"""
- name: Create a volume group datavg
community.general.aix_lvg:
vg: datavg
@@ -86,9 +85,9 @@ EXAMPLES = r'''
vg: rootvg
pvs: hdisk1
state: absent
-'''
+"""
-RETURN = r''' # '''
+RETURN = r""" # """
from ansible.module_utils.basic import AnsibleModule
diff --git a/plugins/modules/aix_lvol.py b/plugins/modules/aix_lvol.py
index 7d0fb1ee09..5e34d0697b 100644
--- a/plugins/modules/aix_lvol.py
+++ b/plugins/modules/aix_lvol.py
@@ -9,10 +9,9 @@ from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = r'''
----
+DOCUMENTATION = r"""
author:
- - Alain Dejoux (@adejoux)
+ - Alain Dejoux (@adejoux)
module: aix_lvol
short_description: Configure AIX LVM logical volumes
description:
@@ -27,58 +26,58 @@ attributes:
options:
vg:
description:
- - The volume group this logical volume is part of.
+ - The volume group this logical volume is part of.
type: str
required: true
lv:
description:
- - The name of the logical volume.
+ - The name of the logical volume.
type: str
required: true
lv_type:
description:
- - The type of the logical volume.
+ - The type of the logical volume.
type: str
default: jfs2
size:
description:
- - The size of the logical volume with one of the [MGT] units.
+ - The size of the logical volume with one of the [MGT] units.
type: str
copies:
description:
- - The number of copies of the logical volume.
- - Maximum copies are 3.
+ - The number of copies of the logical volume.
+ - Maximum copies are 3.
type: int
default: 1
policy:
description:
- - Sets the interphysical volume allocation policy.
- - V(maximum) allocates logical partitions across the maximum number of physical volumes.
- - V(minimum) allocates logical partitions across the minimum number of physical volumes.
+ - Sets the interphysical volume allocation policy.
+ - V(maximum) allocates logical partitions across the maximum number of physical volumes.
+ - V(minimum) allocates logical partitions across the minimum number of physical volumes.
type: str
- choices: [ maximum, minimum ]
+ choices: [maximum, minimum]
default: maximum
state:
description:
- - Control if the logical volume exists. If V(present) and the
- volume does not already exist then the O(size) option is required.
+ - Control if the logical volume exists. If V(present) and the volume does not already exist then the O(size) option
+ is required.
type: str
- choices: [ absent, present ]
+ choices: [absent, present]
default: present
opts:
description:
- - Free-form options to be passed to the mklv command.
+ - Free-form options to be passed to the mklv command.
type: str
default: ''
pvs:
description:
- - A list of physical volumes, for example V(hdisk1,hdisk2).
+ - A list of physical volumes, for example V(hdisk1,hdisk2).
type: list
elements: str
default: []
-'''
+"""
-EXAMPLES = r'''
+EXAMPLES = r"""
- name: Create a logical volume of 512M
community.general.aix_lvol:
vg: testvg
@@ -90,7 +89,7 @@ EXAMPLES = r'''
vg: testvg
lv: test2lv
size: 512M
- pvs: [ hdisk1, hdisk2 ]
+ pvs: [hdisk1, hdisk2]
- name: Create a logical volume of 512M mirrored
community.general.aix_lvol:
@@ -124,15 +123,15 @@ EXAMPLES = r'''
vg: testvg
lv: testlv
state: absent
-'''
+"""
-RETURN = r'''
+RETURN = r"""
msg:
type: str
description: A friendly message describing the task result.
returned: always
sample: Logical volume testlv created.
-'''
+"""
import re
diff --git a/plugins/modules/alerta_customer.py b/plugins/modules/alerta_customer.py
index 5e1a5f86c4..fc5ce32d5c 100644
--- a/plugins/modules/alerta_customer.py
+++ b/plugins/modules/alerta_customer.py
@@ -8,8 +8,7 @@
from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
module: alerta_customer
short_description: Manage customers in Alerta
version_added: 4.8.0
@@ -18,7 +17,7 @@ description:
author: Christian Wollinger (@cwollinger)
seealso:
- name: API documentation
- description: Documentation for Alerta API
+ description: Documentation for Alerta API.
link: https://docs.alerta.io/api/reference.html#customers
extends_documentation_fragment:
- community.general.attributes
@@ -60,11 +59,11 @@ options:
- Whether the customer should exist or not.
- Both O(customer) and O(match) identify a customer that should be added or removed.
type: str
- choices: [ absent, present ]
+ choices: [absent, present]
default: present
-'''
+"""
-EXAMPLES = """
+EXAMPLES = r"""
- name: Create customer
community.general.alerta_customer:
alerta_url: https://alerta.example.com
@@ -83,7 +82,7 @@ EXAMPLES = """
state: absent
"""
-RETURN = """
+RETURN = r"""
msg:
description:
- Success or failure message.
diff --git a/plugins/modules/ali_instance.py b/plugins/modules/ali_instance.py
index 087dc64b6d..050794d55c 100644
--- a/plugins/modules/ali_instance.py
+++ b/plugins/modules/ali_instance.py
@@ -24,243 +24,238 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
module: ali_instance
-short_description: Create, Start, Stop, Restart or Terminate an Instance in ECS; Add or Remove Instance to/from a Security Group
+short_description: Create, Start, Stop, Restart or Terminate an Instance in ECS; Add or Remove Instance to/from a Security
+ Group
description:
- - Create, start, stop, restart, modify or terminate ecs instances.
- - Add or remove ecs instances to/from security group.
+ - Create, start, stop, restart, modify or terminate ECS instances.
+ - Add or remove ecs instances to/from security group.
attributes:
- check_mode:
- support: none
- diff_mode:
- support: none
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
options:
- state:
- description:
- - The state of the instance after operating.
- default: 'present'
- choices: ['present', 'running', 'stopped', 'restarted', 'absent']
- type: str
- availability_zone:
- description:
- - Aliyun availability zone ID in which to launch the instance.
- If it is not specified, it will be allocated by system automatically.
- aliases: ['alicloud_zone', 'zone_id']
- type: str
- image_id:
- description:
- - Image ID used to launch instances. Required when O(state=present) and creating new ECS instances.
- aliases: ['image']
- type: str
- instance_type:
- description:
- - Instance type used to launch instances. Required when O(state=present) and creating new ECS instances.
- aliases: ['type']
- type: str
- security_groups:
- description:
- - A list of security group IDs.
- aliases: ['group_ids']
- type: list
- elements: str
- vswitch_id:
- description:
- - The subnet ID in which to launch the instances (VPC).
- aliases: ['subnet_id']
- type: str
- instance_name:
- description:
- - The name of ECS instance, which is a string of 2 to 128 Chinese or English characters. It must begin with an
- uppercase/lowercase letter or a Chinese character and can contain numerals, ".", "_" or "-".
- It cannot begin with http:// or https://.
- aliases: ['name']
- type: str
+ state:
description:
- description:
- - The description of ECS instance, which is a string of 2 to 256 characters. It cannot begin with http:// or https://.
- type: str
- internet_charge_type:
- description:
- - Internet charge type of ECS instance.
- default: 'PayByBandwidth'
- choices: ['PayByBandwidth', 'PayByTraffic']
- type: str
- max_bandwidth_in:
- description:
- - Maximum incoming bandwidth from the public network, measured in Mbps (Megabits per second).
- default: 200
- type: int
- max_bandwidth_out:
- description:
- - Maximum outgoing bandwidth to the public network, measured in Mbps (Megabits per second).
- Required when O(allocate_public_ip=true). Ignored when O(allocate_public_ip=false).
- default: 0
- type: int
- host_name:
- description:
- - Instance host name. Ordered hostname is not supported.
- type: str
- unique_suffix:
- description:
- - Specifies whether to add sequential suffixes to the host_name.
- The sequential suffix ranges from 001 to 999.
- default: false
- type: bool
- version_added: '0.2.0'
- password:
- description:
- - The password to login instance. After rebooting instances, modified password will take effect.
- type: str
- system_disk_category:
- description:
- - Category of the system disk.
- default: 'cloud_efficiency'
- choices: ['cloud_efficiency', 'cloud_ssd']
- type: str
- system_disk_size:
- description:
- - Size of the system disk, in GB. The valid values are 40~500.
- default: 40
- type: int
- system_disk_name:
- description:
- - Name of the system disk.
- type: str
- system_disk_description:
- description:
- - Description of the system disk.
- type: str
- count:
- description:
- - The number of the new instance. An integer value which indicates how many instances that match O(count_tag)
- should be running. Instances are either created or terminated based on this value.
- default: 1
- type: int
- count_tag:
- description:
- - O(count) determines how many instances based on a specific tag criteria should be present.
- This can be expressed in multiple ways and is shown in the EXAMPLES section.
- The specified count_tag must already exist or be passed in as the O(tags) option.
- If it is not specified, it will be replaced by O(instance_name).
- type: str
- allocate_public_ip:
- description:
- - Whether allocate a public ip for the new instance.
- default: false
- aliases: [ 'assign_public_ip' ]
- type: bool
- instance_charge_type:
- description:
- - The charge type of the instance.
- choices: ['PrePaid', 'PostPaid']
- default: 'PostPaid'
- type: str
- period:
- description:
- - The charge duration of the instance, in months. Required when O(instance_charge_type=PrePaid).
- - The valid value are [1-9, 12, 24, 36].
- default: 1
- type: int
- auto_renew:
- description:
- - Whether automate renew the charge of the instance.
- type: bool
- default: false
- auto_renew_period:
- description:
- - The duration of the automatic renew the charge of the instance. Required when O(auto_renew=true).
- choices: [1, 2, 3, 6, 12]
- type: int
- instance_ids:
- description:
- - A list of instance ids. It is required when need to operate existing instances.
- If it is specified, O(count) will lose efficacy.
- type: list
- elements: str
- force:
- description:
- - Whether the current operation needs to be execute forcibly.
- default: false
- type: bool
- tags:
- description:
- - A hash/dictionaries of instance tags, to add to the new instance or for starting/stopping instance by tag. V({"key":"value"})
- aliases: ["instance_tags"]
- type: dict
- version_added: '0.2.0'
- purge_tags:
- description:
- - Delete any tags not specified in the task that are on the instance.
- If True, it means you have to specify all the desired tags on each task affecting an instance.
- default: false
- type: bool
- version_added: '0.2.0'
- key_name:
- description:
- - The name of key pair which is used to access ECS instance in SSH.
- required: false
- type: str
- aliases: ['keypair']
- user_data:
- description:
- - User-defined data to customize the startup behaviors of an ECS instance and to pass data into an ECS instance.
- It only will take effect when launching the new ECS instances.
- required: false
- type: str
- ram_role_name:
- description:
- - The name of the instance RAM role.
- type: str
- version_added: '0.2.0'
- spot_price_limit:
- description:
- - The maximum hourly price for the preemptible instance. This parameter supports a maximum of three decimal
- places and takes effect when the SpotStrategy parameter is set to SpotWithPriceLimit.
- type: float
- version_added: '0.2.0'
- spot_strategy:
- description:
- - The bidding mode of the pay-as-you-go instance. This parameter is valid when InstanceChargeType is set to PostPaid.
- choices: ['NoSpot', 'SpotWithPriceLimit', 'SpotAsPriceGo']
- default: 'NoSpot'
- type: str
- version_added: '0.2.0'
- period_unit:
- description:
- - The duration unit that you will buy the resource. It is valid when O(instance_charge_type=PrePaid).
- choices: ['Month', 'Week']
- default: 'Month'
- type: str
- version_added: '0.2.0'
- dry_run:
- description:
- - Specifies whether to send a dry-run request.
- - If O(dry_run=true), Only a dry-run request is sent and no instance is created. The system checks whether the
- required parameters are set, and validates the request format, service permissions, and available ECS instances.
- If the validation fails, the corresponding error code is returned. If the validation succeeds, the DryRunOperation error code is returned.
- - If O(dry_run=false), A request is sent. If the validation succeeds, the instance is created.
- default: false
- type: bool
- version_added: '0.2.0'
- include_data_disks:
- description:
- - Whether to change instance disks charge type when changing instance charge type.
- default: true
- type: bool
- version_added: '0.2.0'
+ - The state of the instance after operating.
+ default: 'present'
+ choices: ['present', 'running', 'stopped', 'restarted', 'absent']
+ type: str
+ availability_zone:
+ description:
+ - Aliyun availability zone ID in which to launch the instance. If it is not specified, it is allocated by system automatically.
+ aliases: ['alicloud_zone', 'zone_id']
+ type: str
+ image_id:
+ description:
+ - Image ID used to launch instances. Required when O(state=present) and creating new ECS instances.
+ aliases: ['image']
+ type: str
+ instance_type:
+ description:
+ - Instance type used to launch instances. Required when O(state=present) and creating new ECS instances.
+ aliases: ['type']
+ type: str
+ security_groups:
+ description:
+ - A list of security group IDs.
+ aliases: ['group_ids']
+ type: list
+ elements: str
+ vswitch_id:
+ description:
+ - The subnet ID in which to launch the instances (VPC).
+ aliases: ['subnet_id']
+ type: str
+ instance_name:
+ description:
+ - The name of ECS instance, which is a string of 2 to 128 Chinese or English characters. It must begin with an uppercase/lowercase
+ letter or a Chinese character and can contain numerals, V(.), V(_) or V(-). It cannot begin with V(http://) or V(https://).
+ aliases: ['name']
+ type: str
+ description:
+ description:
+ - The description of ECS instance, which is a string of 2 to 256 characters. It cannot begin with V(http://) or V(https://).
+ type: str
+ internet_charge_type:
+ description:
+ - Internet charge type of ECS instance.
+ default: 'PayByBandwidth'
+ choices: ['PayByBandwidth', 'PayByTraffic']
+ type: str
+ max_bandwidth_in:
+ description:
+ - Maximum incoming bandwidth from the public network, measured in Mbps (Megabits per second).
+ default: 200
+ type: int
+ max_bandwidth_out:
+ description:
+ - Maximum outgoing bandwidth to the public network, measured in Mbps (Megabits per second). Required when O(allocate_public_ip=true).
+ Ignored when O(allocate_public_ip=false).
+ default: 0
+ type: int
+ host_name:
+ description:
+ - Instance host name. Ordered hostname is not supported.
+ type: str
+ unique_suffix:
+ description:
+ - Specifies whether to add sequential suffixes to the host_name. The sequential suffix ranges from 001 to 999.
+ default: false
+ type: bool
+ version_added: '0.2.0'
+ password:
+ description:
+ - The password to login instance. After rebooting instances, modified password is effective.
+ type: str
+ system_disk_category:
+ description:
+ - Category of the system disk.
+ default: 'cloud_efficiency'
+ choices: ['cloud_efficiency', 'cloud_ssd']
+ type: str
+ system_disk_size:
+ description:
+ - Size of the system disk, in GB. The valid values are V(40)~V(500).
+ default: 40
+ type: int
+ system_disk_name:
+ description:
+ - Name of the system disk.
+ type: str
+ system_disk_description:
+ description:
+ - Description of the system disk.
+ type: str
+ count:
+ description:
+ - The number of the new instance. An integer value which indicates how many instances that match O(count_tag) should
+ be running. Instances are either created or terminated based on this value.
+ default: 1
+ type: int
+ count_tag:
+ description:
+ - O(count) determines how many instances based on a specific tag criteria should be present. This can be expressed in
+ multiple ways and is shown in the EXAMPLES section. The specified count_tag must already exist or be passed in as
+ the O(tags) option. If it is not specified, it is replaced by O(instance_name).
+ type: str
+ allocate_public_ip:
+ description:
+ - Whether allocate a public IP for the new instance.
+ default: false
+ aliases: ['assign_public_ip']
+ type: bool
+ instance_charge_type:
+ description:
+ - The charge type of the instance.
+ choices: ['PrePaid', 'PostPaid']
+ default: 'PostPaid'
+ type: str
+ period:
+ description:
+ - The charge duration of the instance, in months. Required when O(instance_charge_type=PrePaid).
+ - The valid value are [V(1-9), V(12), V(24), V(36)].
+ default: 1
+ type: int
+ auto_renew:
+ description:
+ - Whether automate renew the charge of the instance.
+ type: bool
+ default: false
+ auto_renew_period:
+ description:
+ - The duration of the automatic renew the charge of the instance. Required when O(auto_renew=true).
+ choices: [1, 2, 3, 6, 12]
+ type: int
+ instance_ids:
+ description:
+ - A list of instance IDs. It is required when need to operate existing instances. If it is specified, O(count) is ignored.
+ type: list
+ elements: str
+ force:
+ description:
+ - Whether the current operation needs to be execute forcibly.
+ default: false
+ type: bool
+ tags:
+ description:
+ - A hash/dictionaries of instance tags, to add to the new instance or for starting/stopping instance by tag. V({"key":"value"}).
+ aliases: ["instance_tags"]
+ type: dict
+ version_added: '0.2.0'
+ purge_tags:
+ description:
+ - Delete any tags not specified in the task that are on the instance. If V(true), it means you have to specify all the
+ desired tags on each task affecting an instance.
+ default: false
+ type: bool
+ version_added: '0.2.0'
+ key_name:
+ description:
+ - The name of key pair which is used to access ECS instance in SSH.
+ required: false
+ type: str
+ aliases: ['keypair']
+ user_data:
+ description:
+ - User-defined data to customize the startup behaviors of an ECS instance and to pass data into an ECS instance. It
+ only takes effect when launching the new ECS instances.
+ required: false
+ type: str
+ ram_role_name:
+ description:
+ - The name of the instance RAM role.
+ type: str
+ version_added: '0.2.0'
+ spot_price_limit:
+ description:
+ - The maximum hourly price for the preemptible instance. This parameter supports a maximum of three decimal places and
+ takes effect when the SpotStrategy parameter is set to SpotWithPriceLimit.
+ type: float
+ version_added: '0.2.0'
+ spot_strategy:
+ description:
+ - The bidding mode of the pay-as-you-go instance. This parameter is valid when O(instance_charge_type=PostPaid).
+ choices: ['NoSpot', 'SpotWithPriceLimit', 'SpotAsPriceGo']
+ default: 'NoSpot'
+ type: str
+ version_added: '0.2.0'
+ period_unit:
+ description:
+ - The duration unit that you are buying the resource. It is valid when O(instance_charge_type=PrePaid).
+ choices: ['Month', 'Week']
+ default: 'Month'
+ type: str
+ version_added: '0.2.0'
+ dry_run:
+ description:
+ - Specifies whether to send a dry-run request.
+ - If O(dry_run=true), Only a dry-run request is sent and no instance is created. The system checks whether the required
+ parameters are set, and validates the request format, service permissions, and available ECS instances. If the validation
+ fails, the corresponding error code is returned. If the validation succeeds, the DryRunOperation error code is returned.
+ - If O(dry_run=false), a request is sent. If the validation succeeds, the instance is created.
+ default: false
+ type: bool
+ version_added: '0.2.0'
+ include_data_disks:
+ description:
+ - Whether to change instance disks charge type when changing instance charge type.
+ default: true
+ type: bool
+ version_added: '0.2.0'
author:
- - "He Guimin (@xiaozhu36)"
+ - "He Guimin (@xiaozhu36)"
requirements:
- - "Python >= 3.6"
- - "footmark >= 1.19.0"
+ - "Python >= 3.6"
+ - "footmark >= 1.19.0"
extends_documentation_fragment:
- - community.general.alicloud
- - community.general.attributes
-'''
+ - community.general.alicloud
+ - community.general.attributes
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
# basic provisioning example vpc network
- name: Basic provisioning example
hosts: localhost
@@ -298,7 +293,7 @@ EXAMPLES = '''
internet_charge_type: '{{ internet_charge_type }}'
max_bandwidth_out: '{{ max_bandwidth_out }}'
tags:
- Name: created_one
+ Name: created_one
host_name: '{{ host_name }}'
password: '{{ password }}'
@@ -316,11 +311,11 @@ EXAMPLES = '''
internet_charge_type: '{{ internet_charge_type }}'
max_bandwidth_out: '{{ max_bandwidth_out }}'
tags:
- Name: created_one
- Version: 0.1
+ Name: created_one
+ Version: 0.1
count: 2
count_tag:
- Name: created_one
+ Name: created_one
host_name: '{{ host_name }}'
password: '{{ password }}'
@@ -348,278 +343,278 @@ EXAMPLES = '''
alicloud_region: '{{ alicloud_region }}'
instance_ids: '{{ instance_ids }}'
security_groups: '{{ security_groups }}'
-'''
+"""
-RETURN = '''
+RETURN = r"""
instances:
- description: List of ECS instances
- returned: always
- type: complex
- contains:
- availability_zone:
- description: The availability zone of the instance is in.
- returned: always
- type: str
- sample: cn-beijing-a
- block_device_mappings:
- description: Any block device mapping entries for the instance.
- returned: always
- type: complex
- contains:
- device_name:
- description: The device name exposed to the instance (for example, /dev/xvda).
- returned: always
- type: str
- sample: /dev/xvda
- attach_time:
- description: The time stamp when the attachment initiated.
- returned: always
- type: str
- sample: "2018-06-25T04:08:26Z"
- delete_on_termination:
- description: Indicates whether the volume is deleted on instance termination.
- returned: always
- type: bool
- sample: true
- status:
- description: The attachment state.
- returned: always
- type: str
- sample: in_use
- volume_id:
- description: The ID of the cloud disk.
- returned: always
- type: str
- sample: d-2zei53pjsi117y6gf9t6
- cpu:
- description: The CPU core count of the instance.
- returned: always
- type: int
- sample: 4
- creation_time:
- description: The time the instance was created.
- returned: always
- type: str
- sample: "2018-06-25T04:08Z"
- description:
- description: The instance description.
- returned: always
- type: str
- sample: "my ansible instance"
- eip:
- description: The attribution of EIP associated with the instance.
- returned: always
- type: complex
- contains:
- allocation_id:
- description: The ID of the EIP.
- returned: always
- type: str
- sample: eip-12345
- internet_charge_type:
- description: The internet charge type of the EIP.
- returned: always
- type: str
- sample: "paybybandwidth"
- ip_address:
- description: EIP address.
- returned: always
- type: str
- sample: 42.10.2.2
- expired_time:
- description: The time the instance will expire.
- returned: always
- type: str
- sample: "2099-12-31T15:59Z"
- gpu:
- description: The attribution of instance GPU.
- returned: always
- type: complex
- contains:
- amount:
- description: The count of the GPU.
- returned: always
- type: int
- sample: 0
- spec:
- description: The specification of the GPU.
- returned: always
- type: str
- sample: ""
- host_name:
- description: The host name of the instance.
- returned: always
- type: str
- sample: iZ2zewaoZ
- id:
- description: Alias of instance_id.
- returned: always
- type: str
- sample: i-abc12345
- instance_id:
- description: ECS instance resource ID.
- returned: always
- type: str
- sample: i-abc12345
- image_id:
- description: The ID of the image used to launch the instance.
- returned: always
- type: str
- sample: m-0011223344
- inner_ip_address:
- description: The inner IPv4 address of the classic instance.
- returned: always
- type: str
- sample: 10.0.0.2
- instance_charge_type:
- description: The instance charge type.
- returned: always
- type: str
- sample: PostPaid
- instance_name:
- description: The name of the instance.
- returned: always
- type: str
- sample: my-ecs
- instance_type:
- description: The instance type of the running instance.
- returned: always
- type: str
- sample: ecs.sn1ne.xlarge
- instance_type_family:
- description: The instance type family of the instance belongs.
- returned: always
- type: str
- sample: ecs.sn1ne
- internet_charge_type:
- description: The billing method of the network bandwidth.
- returned: always
- type: str
- sample: PayByBandwidth
- internet_max_bandwidth_in:
- description: Maximum incoming bandwidth from the internet network.
- returned: always
- type: int
- sample: 200
- internet_max_bandwidth_out:
- description: Maximum incoming bandwidth from the internet network.
- returned: always
- type: int
- sample: 20
- io_optimized:
- description: Indicates whether the instance is optimized for EBS I/O.
- returned: always
- type: bool
- sample: false
- memory:
- description: Memory size of the instance.
- returned: always
- type: int
- sample: 8192
- network_interfaces:
- description: One or more network interfaces for the instance.
- returned: always
- type: complex
- contains:
- mac_address:
- description: The MAC address.
- returned: always
- type: str
- sample: "00:11:22:33:44:55"
- network_interface_id:
- description: The ID of the network interface.
- returned: always
- type: str
- sample: eni-01234567
- primary_ip_address:
- description: The primary IPv4 address of the network interface within the vswitch.
- returned: always
- type: str
- sample: 10.0.0.1
- osname:
- description: The operation system name of the instance owned.
- returned: always
- type: str
- sample: CentOS
- ostype:
- description: The operation system type of the instance owned.
- returned: always
- type: str
- sample: linux
- private_ip_address:
- description: The IPv4 address of the network interface within the subnet.
- returned: always
- type: str
- sample: 10.0.0.1
- public_ip_address:
- description: The public IPv4 address assigned to the instance or eip address
- returned: always
- type: str
- sample: 43.0.0.1
- resource_group_id:
- description: The id of the resource group to which the instance belongs.
- returned: always
- type: str
- sample: my-ecs-group
- security_groups:
- description: One or more security groups for the instance.
- returned: always
- type: list
- elements: dict
- contains:
- group_id:
- description: The ID of the security group.
- returned: always
- type: str
- sample: sg-0123456
- group_name:
- description: The name of the security group.
- returned: always
- type: str
- sample: my-security-group
- status:
- description: The current status of the instance.
- returned: always
- type: str
- sample: running
- tags:
- description: Any tags assigned to the instance.
- returned: always
- type: dict
- sample:
- user_data:
- description: User-defined data.
- returned: always
- type: dict
- sample:
- vswitch_id:
- description: The ID of the vswitch in which the instance is running.
- returned: always
- type: str
- sample: vsw-dew00abcdef
- vpc_id:
- description: The ID of the VPC the instance is in.
- returned: always
- type: str
- sample: vpc-0011223344
- spot_price_limit:
- description:
- - The maximum hourly price for the preemptible instance.
- returned: always
- type: float
- sample: 0.97
- spot_strategy:
- description:
- - The bidding mode of the pay-as-you-go instance.
+ description: List of ECS instances.
+ returned: always
+ type: complex
+ contains:
+ availability_zone:
+ description: The availability zone of the instance is in.
+ returned: always
+ type: str
+ sample: cn-beijing-a
+ block_device_mappings:
+ description: Any block device mapping entries for the instance.
+ returned: always
+ type: complex
+ contains:
+ device_name:
+ description: The device name exposed to the instance.
returned: always
type: str
- sample: NoSpot
+ sample: /dev/xvda
+ attach_time:
+ description: The time stamp when the attachment initiated.
+ returned: always
+ type: str
+ sample: "2018-06-25T04:08:26Z"
+ delete_on_termination:
+ description: Indicates whether the volume is deleted on instance termination.
+ returned: always
+ type: bool
+ sample: true
+ status:
+ description: The attachment state.
+ returned: always
+ type: str
+ sample: in_use
+ volume_id:
+ description: The ID of the cloud disk.
+ returned: always
+ type: str
+ sample: d-2zei53pjsi117y6gf9t6
+ cpu:
+ description: The CPU core count of the instance.
+ returned: always
+ type: int
+ sample: 4
+ creation_time:
+ description: The time the instance was created.
+ returned: always
+ type: str
+ sample: "2018-06-25T04:08Z"
+ description:
+ description: The instance description.
+ returned: always
+ type: str
+ sample: "my ansible instance"
+ eip:
+ description: The attribution of EIP associated with the instance.
+ returned: always
+ type: complex
+ contains:
+ allocation_id:
+ description: The ID of the EIP.
+ returned: always
+ type: str
+ sample: eip-12345
+ internet_charge_type:
+ description: The internet charge type of the EIP.
+ returned: always
+ type: str
+ sample: "paybybandwidth"
+ ip_address:
+ description: EIP address.
+ returned: always
+ type: str
+ sample: 42.10.2.2
+ expired_time:
+ description: The time the instance expires.
+ returned: always
+ type: str
+ sample: "2099-12-31T15:59Z"
+ gpu:
+ description: The attribution of instance GPU.
+ returned: always
+ type: complex
+ contains:
+ amount:
+ description: The count of the GPU.
+ returned: always
+ type: int
+ sample: 0
+ spec:
+ description: The specification of the GPU.
+ returned: always
+ type: str
+ sample: ""
+ host_name:
+ description: The host name of the instance.
+ returned: always
+ type: str
+ sample: iZ2zewaoZ
+ id:
+ description: Alias of instance_id.
+ returned: always
+ type: str
+ sample: i-abc12345
+ instance_id:
+ description: ECS instance resource ID.
+ returned: always
+ type: str
+ sample: i-abc12345
+ image_id:
+ description: The ID of the image used to launch the instance.
+ returned: always
+ type: str
+ sample: m-0011223344
+ inner_ip_address:
+ description: The inner IPv4 address of the classic instance.
+ returned: always
+ type: str
+ sample: 10.0.0.2
+ instance_charge_type:
+ description: The instance charge type.
+ returned: always
+ type: str
+ sample: PostPaid
+ instance_name:
+ description: The name of the instance.
+ returned: always
+ type: str
+ sample: my-ecs
+ instance_type:
+ description: The instance type of the running instance.
+ returned: always
+ type: str
+ sample: ecs.sn1ne.xlarge
+ instance_type_family:
+ description: The instance type family of the instance belongs.
+ returned: always
+ type: str
+ sample: ecs.sn1ne
+ internet_charge_type:
+ description: The billing method of the network bandwidth.
+ returned: always
+ type: str
+ sample: PayByBandwidth
+ internet_max_bandwidth_in:
+ description: Maximum incoming bandwidth from the internet network.
+ returned: always
+ type: int
+ sample: 200
+ internet_max_bandwidth_out:
+ description: Maximum incoming bandwidth from the internet network.
+ returned: always
+ type: int
+ sample: 20
+ io_optimized:
+ description: Indicates whether the instance is optimized for EBS I/O.
+ returned: always
+ type: bool
+ sample: false
+ memory:
+ description: Memory size of the instance.
+ returned: always
+ type: int
+ sample: 8192
+ network_interfaces:
+ description: One or more network interfaces for the instance.
+ returned: always
+ type: complex
+ contains:
+ mac_address:
+ description: The MAC address.
+ returned: always
+ type: str
+ sample: "00:11:22:33:44:55"
+ network_interface_id:
+ description: The ID of the network interface.
+ returned: always
+ type: str
+ sample: eni-01234567
+ primary_ip_address:
+ description: The primary IPv4 address of the network interface within the vswitch.
+ returned: always
+ type: str
+ sample: 10.0.0.1
+ osname:
+ description: The operation system name of the instance owned.
+ returned: always
+ type: str
+ sample: CentOS
+ ostype:
+ description: The operation system type of the instance owned.
+ returned: always
+ type: str
+ sample: linux
+ private_ip_address:
+ description: The IPv4 address of the network interface within the subnet.
+ returned: always
+ type: str
+ sample: 10.0.0.1
+ public_ip_address:
+ description: The public IPv4 address assigned to the instance or eip address.
+ returned: always
+ type: str
+ sample: 43.0.0.1
+ resource_group_id:
+ description: The ID of the resource group to which the instance belongs.
+ returned: always
+ type: str
+ sample: my-ecs-group
+ security_groups:
+ description: One or more security groups for the instance.
+ returned: always
+ type: list
+ elements: dict
+ contains:
+ group_id:
+ description: The ID of the security group.
+ returned: always
+ type: str
+ sample: sg-0123456
+ group_name:
+ description: The name of the security group.
+ returned: always
+ type: str
+ sample: my-security-group
+ status:
+ description: The current status of the instance.
+ returned: always
+ type: str
+ sample: running
+ tags:
+ description: Any tags assigned to the instance.
+ returned: always
+ type: dict
+ sample:
+ user_data:
+ description: User-defined data.
+ returned: always
+ type: dict
+ sample:
+ vswitch_id:
+ description: The ID of the vswitch in which the instance is running.
+ returned: always
+ type: str
+ sample: vsw-dew00abcdef
+ vpc_id:
+ description: The ID of the VPC the instance is in.
+ returned: always
+ type: str
+ sample: vpc-0011223344
+ spot_price_limit:
+ description:
+ - The maximum hourly price for the preemptible instance.
+ returned: always
+ type: float
+ sample: 0.97
+ spot_strategy:
+ description:
+ - The bidding mode of the pay-as-you-go instance.
+ returned: always
+ type: str
+ sample: NoSpot
ids:
- description: List of ECS instance IDs
- returned: always
- type: list
- sample: [i-12345er, i-3245fs]
-'''
+ description: List of ECS instance IDs.
+ returned: always
+ type: list
+ sample: ["i-12345er", "i-3245fs"]
+"""
import re
import time
diff --git a/plugins/modules/ali_instance_info.py b/plugins/modules/ali_instance_info.py
index d6a7873742..7be5b8cda6 100644
--- a/plugins/modules/ali_instance_info.py
+++ b/plugins/modules/ali_instance_info.py
@@ -24,51 +24,48 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
module: ali_instance_info
short_description: Gather information on instances of Alibaba Cloud ECS
description:
- - This module fetches data from the Open API in Alicloud.
- The module must be called from within the ECS instance itself.
-
+ - This module fetches data from the Open API in Alicloud. The module must be called from within the ECS instance itself.
attributes:
- check_mode:
- version_added: 3.3.0
+ check_mode:
+ version_added: 3.3.0
# This was backported to 2.5.4 and 1.3.11 as well, since this was a bugfix
options:
- name_prefix:
- description:
- - Use a instance name prefix to filter ecs instances.
- type: str
- version_added: '0.2.0'
- tags:
- description:
- - A hash/dictionaries of instance tags. C({"key":"value"})
- aliases: ["instance_tags"]
- type: dict
- filters:
- description:
- - A dict of filters to apply. Each dict item consists of a filter key and a filter value. The filter keys can be
- all of request parameters. See U(https://www.alibabacloud.com/help/doc-detail/25506.htm) for parameter details.
- Filter keys can be same as request parameter name or be lower case and use underscore (V("_")) or dash (V("-")) to
- connect different words in one parameter. C(InstanceIds) should be a list.
- C(Tag.n.Key) and C(Tag.n.Value) should be a dict and using O(tags) instead.
- type: dict
- version_added: '0.2.0'
+ name_prefix:
+ description:
+ - Use a instance name prefix to filter ECS instances.
+ type: str
+ version_added: '0.2.0'
+ tags:
+ description:
+ - A hash/dictionaries of instance tags. C({"key":"value"}).
+ aliases: ["instance_tags"]
+ type: dict
+ filters:
+ description:
+ - A dict of filters to apply. Each dict item consists of a filter key and a filter value. The filter keys can be all
+ of request parameters. See U(https://www.alibabacloud.com/help/doc-detail/25506.htm) for parameter details. Filter
+ keys can be same as request parameter name or be lower case and use underscore (V("_")) or dash (V("-")) to connect
+ different words in one parameter. C(InstanceIds) should be a list. C(Tag.n.Key) and C(Tag.n.Value) should be a dict
+ and using O(tags) instead.
+ type: dict
+ version_added: '0.2.0'
author:
- - "He Guimin (@xiaozhu36)"
+ - "He Guimin (@xiaozhu36)"
requirements:
- - "Python >= 3.6"
- - "footmark >= 1.13.0"
+ - "Python >= 3.6"
+ - "footmark >= 1.13.0"
extends_documentation_fragment:
- - community.general.alicloud
- - community.general.attributes
- - community.general.attributes.info_module
-'''
+ - community.general.alicloud
+ - community.general.attributes
+ - community.general.attributes.info_module
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
# Fetch instances details according to setting different filters
- name: Find all instances in the specified region
@@ -91,261 +88,261 @@ EXAMPLES = '''
community.general.ali_instance_info:
tags:
Test: "add"
-'''
+"""
-RETURN = '''
+RETURN = r"""
instances:
- description: List of ECS instances
- returned: always
- type: complex
- contains:
- availability_zone:
- description: The availability zone of the instance is in.
- returned: always
- type: str
- sample: cn-beijing-a
- block_device_mappings:
- description: Any block device mapping entries for the instance.
- returned: always
- type: complex
- contains:
- device_name:
- description: The device name exposed to the instance (for example, /dev/xvda).
- returned: always
- type: str
- sample: /dev/xvda
- attach_time:
- description: The time stamp when the attachment initiated.
- returned: always
- type: str
- sample: "2018-06-25T04:08:26Z"
- delete_on_termination:
- description: Indicates whether the volume is deleted on instance termination.
- returned: always
- type: bool
- sample: true
- status:
- description: The attachment state.
- returned: always
- type: str
- sample: in_use
- volume_id:
- description: The ID of the cloud disk.
- returned: always
- type: str
- sample: d-2zei53pjsi117y6gf9t6
- cpu:
- description: The CPU core count of the instance.
- returned: always
- type: int
- sample: 4
- creation_time:
- description: The time the instance was created.
- returned: always
- type: str
- sample: "2018-06-25T04:08Z"
- description:
- description: The instance description.
- returned: always
- type: str
- sample: "my ansible instance"
- eip:
- description: The attribution of EIP associated with the instance.
- returned: always
- type: complex
- contains:
- allocation_id:
- description: The ID of the EIP.
- returned: always
- type: str
- sample: eip-12345
- internet_charge_type:
- description: The internet charge type of the EIP.
- returned: always
- type: str
- sample: "paybybandwidth"
- ip_address:
- description: EIP address.
- returned: always
- type: str
- sample: 42.10.2.2
- expired_time:
- description: The time the instance will expire.
- returned: always
- type: str
- sample: "2099-12-31T15:59Z"
- gpu:
- description: The attribution of instance GPU.
- returned: always
- type: complex
- contains:
- amount:
- description: The count of the GPU.
- returned: always
- type: int
- sample: 0
- spec:
- description: The specification of the GPU.
- returned: always
- type: str
- sample: ""
- host_name:
- description: The host name of the instance.
- returned: always
- type: str
- sample: iZ2zewaoZ
- id:
- description: Alias of instance_id.
- returned: always
- type: str
- sample: i-abc12345
- instance_id:
- description: ECS instance resource ID.
- returned: always
- type: str
- sample: i-abc12345
- image_id:
- description: The ID of the image used to launch the instance.
- returned: always
- type: str
- sample: m-0011223344
- inner_ip_address:
- description: The inner IPv4 address of the classic instance.
- returned: always
- type: str
- sample: 10.0.0.2
- instance_charge_type:
- description: The instance charge type.
- returned: always
- type: str
- sample: PostPaid
- instance_name:
- description: The name of the instance.
- returned: always
- type: str
- sample: my-ecs
- instance_type_family:
- description: The instance type family of the instance belongs.
- returned: always
- type: str
- sample: ecs.sn1ne
- instance_type:
- description: The instance type of the running instance.
- returned: always
- type: str
- sample: ecs.sn1ne.xlarge
- internet_charge_type:
- description: The billing method of the network bandwidth.
- returned: always
- type: str
- sample: PayByBandwidth
- internet_max_bandwidth_in:
- description: Maximum incoming bandwidth from the internet network.
- returned: always
- type: int
- sample: 200
- internet_max_bandwidth_out:
- description: Maximum incoming bandwidth from the internet network.
- returned: always
- type: int
- sample: 20
- io_optimized:
- description: Indicates whether the instance is optimized for EBS I/O.
- returned: always
- type: bool
- sample: false
- memory:
- description: Memory size of the instance.
- returned: always
- type: int
- sample: 8192
- network_interfaces:
- description: One or more network interfaces for the instance.
- returned: always
- type: complex
- contains:
- mac_address:
- description: The MAC address.
- returned: always
- type: str
- sample: "00:11:22:33:44:55"
- network_interface_id:
- description: The ID of the network interface.
- returned: always
- type: str
- sample: eni-01234567
- primary_ip_address:
- description: The primary IPv4 address of the network interface within the vswitch.
- returned: always
- type: str
- sample: 10.0.0.1
- osname:
- description: The operation system name of the instance owned.
- returned: always
- type: str
- sample: CentOS
- ostype:
- description: The operation system type of the instance owned.
- returned: always
- type: str
- sample: linux
- private_ip_address:
- description: The IPv4 address of the network interface within the subnet.
- returned: always
- type: str
- sample: 10.0.0.1
- public_ip_address:
- description: The public IPv4 address assigned to the instance or eip address
- returned: always
- type: str
- sample: 43.0.0.1
- resource_group_id:
- description: The id of the resource group to which the instance belongs.
- returned: always
- type: str
- sample: my-ecs-group
- security_groups:
- description: One or more security groups for the instance.
- returned: always
- type: list
- elements: dict
- contains:
- group_id:
- description: The ID of the security group.
- returned: always
- type: str
- sample: sg-0123456
- group_name:
- description: The name of the security group.
- returned: always
- type: str
- sample: my-security-group
+ description: List of ECS instances.
+ returned: always
+ type: complex
+ contains:
+ availability_zone:
+ description: The availability zone of the instance is in.
+ returned: always
+ type: str
+ sample: cn-beijing-a
+ block_device_mappings:
+ description: Any block device mapping entries for the instance.
+ returned: always
+ type: complex
+ contains:
+ device_name:
+ description: The device name exposed to the instance (for example, /dev/xvda).
+ returned: always
+ type: str
+ sample: /dev/xvda
+ attach_time:
+ description: The time stamp when the attachment initiated.
+ returned: always
+ type: str
+ sample: "2018-06-25T04:08:26Z"
+ delete_on_termination:
+ description: Indicates whether the volume is deleted on instance termination.
+ returned: always
+ type: bool
+ sample: true
status:
- description: The current status of the instance.
- returned: always
- type: str
- sample: running
- tags:
- description: Any tags assigned to the instance.
- returned: always
- type: dict
- sample:
- vswitch_id:
- description: The ID of the vswitch in which the instance is running.
- returned: always
- type: str
- sample: vsw-dew00abcdef
- vpc_id:
- description: The ID of the VPC the instance is in.
- returned: always
- type: str
- sample: vpc-0011223344
+ description: The attachment state.
+ returned: always
+ type: str
+ sample: in_use
+ volume_id:
+ description: The ID of the cloud disk.
+ returned: always
+ type: str
+ sample: d-2zei53pjsi117y6gf9t6
+ cpu:
+ description: The CPU core count of the instance.
+ returned: always
+ type: int
+ sample: 4
+ creation_time:
+ description: The time the instance was created.
+ returned: always
+ type: str
+ sample: "2018-06-25T04:08Z"
+ description:
+ description: The instance description.
+ returned: always
+ type: str
+ sample: "my ansible instance"
+ eip:
+ description: The attribution of EIP associated with the instance.
+ returned: always
+ type: complex
+ contains:
+ allocation_id:
+ description: The ID of the EIP.
+ returned: always
+ type: str
+ sample: eip-12345
+ internet_charge_type:
+ description: The internet charge type of the EIP.
+ returned: always
+ type: str
+ sample: "paybybandwidth"
+ ip_address:
+ description: EIP address.
+ returned: always
+ type: str
+ sample: 42.10.2.2
+ expired_time:
+ description: The time the instance expires.
+ returned: always
+ type: str
+ sample: "2099-12-31T15:59Z"
+ gpu:
+ description: The attribution of instance GPU.
+ returned: always
+ type: complex
+ contains:
+ amount:
+ description: The count of the GPU.
+ returned: always
+ type: int
+ sample: 0
+ spec:
+ description: The specification of the GPU.
+ returned: always
+ type: str
+ sample: ""
+ host_name:
+ description: The host name of the instance.
+ returned: always
+ type: str
+ sample: iZ2zewaoZ
+ id:
+ description: Alias of instance_id.
+ returned: always
+ type: str
+ sample: i-abc12345
+ instance_id:
+ description: ECS instance resource ID.
+ returned: always
+ type: str
+ sample: i-abc12345
+ image_id:
+ description: The ID of the image used to launch the instance.
+ returned: always
+ type: str
+ sample: m-0011223344
+ inner_ip_address:
+ description: The inner IPv4 address of the classic instance.
+ returned: always
+ type: str
+ sample: 10.0.0.2
+ instance_charge_type:
+ description: The instance charge type.
+ returned: always
+ type: str
+ sample: PostPaid
+ instance_name:
+ description: The name of the instance.
+ returned: always
+ type: str
+ sample: my-ecs
+ instance_type_family:
+ description: The instance type family of the instance belongs.
+ returned: always
+ type: str
+ sample: ecs.sn1ne
+ instance_type:
+ description: The instance type of the running instance.
+ returned: always
+ type: str
+ sample: ecs.sn1ne.xlarge
+ internet_charge_type:
+ description: The billing method of the network bandwidth.
+ returned: always
+ type: str
+ sample: PayByBandwidth
+ internet_max_bandwidth_in:
+ description: Maximum incoming bandwidth from the internet network.
+ returned: always
+ type: int
+ sample: 200
+ internet_max_bandwidth_out:
+ description: Maximum incoming bandwidth from the internet network.
+ returned: always
+ type: int
+ sample: 20
+ io_optimized:
+ description: Indicates whether the instance is optimized for EBS I/O.
+ returned: always
+ type: bool
+ sample: false
+ memory:
+ description: Memory size of the instance.
+ returned: always
+ type: int
+ sample: 8192
+ network_interfaces:
+ description: One or more network interfaces for the instance.
+ returned: always
+ type: complex
+ contains:
+ mac_address:
+ description: The MAC address.
+ returned: always
+ type: str
+ sample: "00:11:22:33:44:55"
+ network_interface_id:
+ description: The ID of the network interface.
+ returned: always
+ type: str
+ sample: eni-01234567
+ primary_ip_address:
+ description: The primary IPv4 address of the network interface within the vswitch.
+ returned: always
+ type: str
+ sample: 10.0.0.1
+ osname:
+ description: The operation system name of the instance owned.
+ returned: always
+ type: str
+ sample: CentOS
+ ostype:
+ description: The operation system type of the instance owned.
+ returned: always
+ type: str
+ sample: linux
+ private_ip_address:
+ description: The IPv4 address of the network interface within the subnet.
+ returned: always
+ type: str
+ sample: 10.0.0.1
+ public_ip_address:
+ description: The public IPv4 address assigned to the instance or EIP address.
+ returned: always
+ type: str
+ sample: 43.0.0.1
+ resource_group_id:
+ description: The ID of the resource group to which the instance belongs.
+ returned: always
+ type: str
+ sample: my-ecs-group
+ security_groups:
+ description: One or more security groups for the instance.
+ returned: always
+ type: list
+ elements: dict
+ contains:
+ group_id:
+ description: The ID of the security group.
+ returned: always
+ type: str
+ sample: sg-0123456
+ group_name:
+ description: The name of the security group.
+ returned: always
+ type: str
+ sample: my-security-group
+ status:
+ description: The current status of the instance.
+ returned: always
+ type: str
+ sample: running
+ tags:
+ description: Any tags assigned to the instance.
+ returned: always
+ type: dict
+ sample:
+ vswitch_id:
+ description: The ID of the vswitch in which the instance is running.
+ returned: always
+ type: str
+ sample: vsw-dew00abcdef
+ vpc_id:
+ description: The ID of the VPC the instance is in.
+ returned: always
+ type: str
+ sample: vpc-0011223344
ids:
- description: List of ECS instance IDs
- returned: always
- type: list
- sample: [i-12345er, i-3245fs]
-'''
+ description: List of ECS instance IDs.
+ returned: always
+ type: list
+ sample: ["i-12345er", "i-3245fs"]
+"""
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
from ansible_collections.community.general.plugins.module_utils.alicloud_ecs import (
diff --git a/plugins/modules/alternatives.py b/plugins/modules/alternatives.py
index da578276fa..c96aede225 100644
--- a/plugins/modules/alternatives.py
+++ b/plugins/modules/alternatives.py
@@ -11,19 +11,18 @@ from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = r'''
----
+DOCUMENTATION = r"""
module: alternatives
short_description: Manages alternative programs for common commands
description:
- - Manages symbolic links using the 'update-alternatives' tool.
- - Useful when multiple programs are installed but provide similar functionality (e.g. different editors).
+ - Manages symbolic links using the C(update-alternatives) tool.
+ - Useful when multiple programs are installed but provide similar functionality (for example, different editors).
author:
- - Marius Rieder (@jiuka)
- - David Wittman (@DavidWittman)
- - Gabe Mulley (@mulby)
+ - Marius Rieder (@jiuka)
+ - David Wittman (@DavidWittman)
+ - Gabe Mulley (@mulby)
extends_documentation_fragment:
- - community.general.attributes
+ - community.general.attributes
attributes:
check_mode:
support: full
@@ -39,12 +38,16 @@ options:
description:
- The path to the real executable that the link should point to.
type: path
- required: true
+ family:
+ description:
+ - The family groups similar alternatives. This option is available only on RHEL-based distributions.
+ type: str
+ version_added: 10.1.0
link:
description:
- The path to the symbolic link that should point to the real executable.
- - This option is always required on RHEL-based distributions. On Debian-based distributions this option is
- required when the alternative O(name) is unknown to the system.
+ - This option is always required on RHEL-based distributions. On Debian-based distributions this option is required
+ when the alternative O(name) is unknown to the system.
type: path
priority:
description:
@@ -52,14 +55,14 @@ options:
type: int
state:
description:
- - V(present) - install the alternative (if not already installed), but do
- not set it as the currently selected alternative for the group.
- - V(selected) - install the alternative (if not already installed), and
- set it as the currently selected alternative for the group.
- - V(auto) - install the alternative (if not already installed), and
- set the group to auto mode. Added in community.general 5.1.0.
+ - V(present) - install the alternative (if not already installed), but do not set it as the currently selected alternative
+ for the group.
+ - V(selected) - install the alternative (if not already installed), and set it as the currently selected alternative
+ for the group.
+ - V(auto) - install the alternative (if not already installed), and set the group to auto mode. Added in community.general
+ 5.1.0.
- V(absent) - removes the alternative. Added in community.general 5.1.0.
- choices: [ present, selected, auto, absent ]
+ choices: [present, selected, auto, absent]
default: selected
type: str
version_added: 4.8.0
@@ -67,8 +70,7 @@ options:
description:
- A list of subcommands.
- Each subcommand needs a name, a link and a path parameter.
- - Subcommands are also named 'slaves' or 'followers', depending on the version
- of alternatives.
+ - Subcommands are also named C(slaves) or C(followers), depending on the version of C(alternatives).
type: list
elements: dict
aliases: ['slaves']
@@ -89,15 +91,21 @@ options:
type: path
required: true
version_added: 5.1.0
-requirements: [ update-alternatives ]
-'''
+requirements: [update-alternatives]
+"""
-EXAMPLES = r'''
+EXAMPLES = r"""
- name: Correct java version selected
community.general.alternatives:
name: java
path: /usr/lib/jvm/java-7-openjdk-amd64/jre/bin/java
+- name: Select java-11-openjdk.x86_64 family
+ community.general.alternatives:
+ name: java
+ family: java-11-openjdk.x86_64
+ when: ansible_os_family == 'RedHat'
+
- name: Alternatives link created
community.general.alternatives:
name: hadoop-conf
@@ -133,7 +141,7 @@ EXAMPLES = r'''
- name: keytool
link: /usr/bin/keytool
path: /usr/lib/jvm/java-7-openjdk-amd64/jre/bin/keytool
-'''
+"""
import os
import re
@@ -182,17 +190,25 @@ class AlternativesModule(object):
subcommands_parameter = self.module.params['subcommands']
priority_parameter = self.module.params['priority']
if (
- self.path not in self.current_alternatives or
- (priority_parameter is not None and self.current_alternatives[self.path].get('priority') != priority_parameter) or
- (subcommands_parameter is not None and (
- not all(s in subcommands_parameter for s in self.current_alternatives[self.path].get('subcommands')) or
- not all(s in self.current_alternatives[self.path].get('subcommands') for s in subcommands_parameter)
- ))
+ self.path is not None and (
+ self.path not in self.current_alternatives or
+ (priority_parameter is not None and self.current_alternatives[self.path].get('priority') != priority_parameter) or
+ (subcommands_parameter is not None and (
+ not all(s in subcommands_parameter for s in self.current_alternatives[self.path].get('subcommands')) or
+ not all(s in self.current_alternatives[self.path].get('subcommands') for s in subcommands_parameter)
+ ))
+ )
):
self.install()
# Check if we need to set the preference
- if self.mode_selected and self.current_path != self.path:
+ is_same_path = self.path is not None and self.current_path == self.path
+ is_same_family = False
+ if self.current_path is not None and self.current_path in self.current_alternatives:
+ current_alternative = self.current_alternatives[self.current_path]
+ is_same_family = current_alternative.get('family') == self.family
+
+ if self.mode_selected and not (is_same_path or is_same_family):
self.set()
# Check if we need to reset to auto
@@ -213,6 +229,8 @@ class AlternativesModule(object):
self.module.fail_json(msg='Needed to install the alternative, but unable to do so as we are missing the link')
cmd = [self.UPDATE_ALTERNATIVES, '--install', self.link, self.name, self.path, str(self.priority)]
+ if self.family is not None:
+ cmd.extend(["--family", self.family])
if self.module.params['subcommands'] is not None:
subcommands = [['--slave', subcmd['link'], subcmd['name'], subcmd['path']] for subcmd in self.subcommands]
@@ -228,6 +246,7 @@ class AlternativesModule(object):
self.result['diff']['after'] = dict(
state=AlternativeState.PRESENT,
path=self.path,
+ family=self.family,
priority=self.priority,
link=self.link,
)
@@ -248,9 +267,15 @@ class AlternativesModule(object):
self.result['diff']['after'] = dict(state=AlternativeState.ABSENT)
def set(self):
- cmd = [self.UPDATE_ALTERNATIVES, '--set', self.name, self.path]
+ # Path takes precedence over family as it is more specific
+ if self.path is None:
+ arg = self.family
+ else:
+ arg = self.path
+
+ cmd = [self.UPDATE_ALTERNATIVES, '--set', self.name, arg]
self.result['changed'] = True
- self.messages.append("Set alternative '%s' for '%s'." % (self.path, self.name))
+ self.messages.append("Set alternative '%s' for '%s'." % (arg, self.name))
if not self.module.check_mode:
self.module.run_command(cmd, check_rc=True)
@@ -277,6 +302,10 @@ class AlternativesModule(object):
def path(self):
return self.module.params.get('path')
+ @property
+ def family(self):
+ return self.module.params.get('family')
+
@property
def link(self):
return self.module.params.get('link') or self.current_link
@@ -321,7 +350,7 @@ class AlternativesModule(object):
current_link_regex = re.compile(r'^\s*link \w+ is (.*)$', re.MULTILINE)
subcmd_path_link_regex = re.compile(r'^\s*(?:slave|follower) (\S+) is (.*)$', re.MULTILINE)
- alternative_regex = re.compile(r'^(\/.*)\s-\s(?:family\s\S+\s)?priority\s(\d+)((?:\s+(?:slave|follower).*)*)', re.MULTILINE)
+ alternative_regex = re.compile(r'^(\/.*)\s-\s(?:family\s(\S+)\s)?priority\s(\d+)((?:\s+(?:slave|follower).*)*)', re.MULTILINE)
subcmd_regex = re.compile(r'^\s+(?:slave|follower) (.*): (.*)$', re.MULTILINE)
match = current_mode_regex.search(display_output)
@@ -346,9 +375,10 @@ class AlternativesModule(object):
if not subcmd_path_map and self.subcommands:
subcmd_path_map = {s['name']: s['link'] for s in self.subcommands}
- for path, prio, subcmd in alternative_regex.findall(display_output):
+ for path, family, prio, subcmd in alternative_regex.findall(display_output):
self.current_alternatives[path] = dict(
priority=int(prio),
+ family=family,
subcommands=[dict(
name=name,
path=spath,
@@ -383,7 +413,8 @@ def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(type='str', required=True),
- path=dict(type='path', required=True),
+ path=dict(type='path'),
+ family=dict(type='str'),
link=dict(type='path'),
priority=dict(type='int'),
state=dict(
@@ -398,6 +429,7 @@ def main():
)),
),
supports_check_mode=True,
+ required_one_of=[('path', 'family')]
)
AlternativesModule(module)
diff --git a/plugins/modules/android_sdk.py b/plugins/modules/android_sdk.py
new file mode 100644
index 0000000000..a9bffa50ea
--- /dev/null
+++ b/plugins/modules/android_sdk.py
@@ -0,0 +1,209 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2024, Stanislav Shamilov
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = r"""
+module: android_sdk
+short_description: Manages Android SDK packages
+description:
+ - Manages Android SDK packages.
+ - Allows installation from different channels (stable, beta, dev, canary).
+ - Allows installation of packages to a non-default SDK root directory.
+author: Stanislav Shamilov (@shamilovstas)
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+version_added: 10.2.0
+options:
+ accept_licenses:
+ description:
+ - If this is set to V(true), the module attempts to accept license prompts generated by C(sdkmanager) during package
+ installation. Otherwise, every license prompt is rejected.
+ type: bool
+ default: false
+ name:
+ description:
+ - A name of an Android SDK package (for instance, V(build-tools;34.0.0)).
+ aliases: ['package', 'pkg']
+ type: list
+ elements: str
+ state:
+ description:
+ - Indicates the desired package(s) state.
+ - V(present) ensures that package(s) is/are present.
+ - V(absent) ensures that package(s) is/are absent.
+ - V(latest) ensures that package(s) is/are installed and updated to the latest version(s).
+ choices: ['present', 'absent', 'latest']
+ default: present
+ type: str
+ sdk_root:
+ description:
+ - Provides path for an alternative directory to install Android SDK packages to. By default, all packages are installed
+ to the directory where C(sdkmanager) is installed.
+ type: path
+ channel:
+ description:
+ - Indicates what channel must C(sdkmanager) use for installation of packages.
+ choices: ['stable', 'beta', 'dev', 'canary']
+ default: stable
+ type: str
+requirements:
+ - C(java) >= 17
+ - C(sdkmanager) Command line tool for installing Android SDK packages.
+notes:
+ - For some of the packages installed by C(sdkmanager) is it necessary to accept licenses. Usually it is done through command
+ line prompt in a form of a Y/N question when a licensed package is requested to be installed. If there are several packages
+ requested for installation and at least two of them belong to different licenses, the C(sdkmanager) tool prompts for these
+ licenses in a loop. In order to install packages, the module must be able to answer these license prompts. Currently,
+ it is only possible to answer one license prompt at a time, meaning that instead of installing multiple packages as a
+ single invocation of the C(sdkmanager --install) command, it is done by executing the command independently for each package.
+ This makes sure that at most only one license prompt needs to be answered. At the time of writing this module, a C(sdkmanager)'s
+ package may belong to at most one license type that needs to be accepted. However, if this changes in the future, the
+ module may hang as there might be more prompts generated by the C(sdkmanager) tool which the module is unable to answer.
+ If this becomes the case, file an issue and in the meantime, consider accepting all the licenses in advance, as it is
+ described in the C(sdkmanager) L(documentation,https://developer.android.com/tools/sdkmanager#accept-licenses), for instance,
+ using the M(ansible.builtin.command) module.
+seealso:
+ - name: sdkmanager tool documentation
+ description: Detailed information of how to install and use sdkmanager command line tool.
+ link: https://developer.android.com/tools/sdkmanager
+"""
+
+EXAMPLES = r"""
+- name: Install build-tools;34.0.0
+ community.general.android_sdk:
+ name: build-tools;34.0.0
+ accept_licenses: true
+ state: present
+
+- name: Install build-tools;34.0.0 and platform-tools
+ community.general.android_sdk:
+ name:
+ - build-tools;34.0.0
+ - platform-tools
+ accept_licenses: true
+ state: present
+
+- name: Delete build-tools;34.0.0
+ community.general.android_sdk:
+ name: build-tools;34.0.0
+ state: absent
+
+- name: Install platform-tools or update if installed
+ community.general.android_sdk:
+ name: platform-tools
+ accept_licenses: true
+ state: latest
+
+- name: Install build-tools;34.0.0 to a different SDK root
+ community.general.android_sdk:
+ name: build-tools;34.0.0
+ accept_licenses: true
+ state: present
+ sdk_root: "/path/to/new/root"
+
+- name: Install a package from another channel
+ community.general.android_sdk:
+ name: some-package-present-in-canary-channel
+ accept_licenses: true
+ state: present
+ channel: canary
+"""
+
+RETURN = r"""
+installed:
+ description: A list of packages that have been installed.
+ returned: when packages have changed
+ type: list
+ sample: ["build-tools;34.0.0", "platform-tools"]
+
+removed:
+ description: A list of packages that have been removed.
+ returned: when packages have changed
+ type: list
+ sample: ["build-tools;34.0.0", "platform-tools"]
+"""
+
+from ansible_collections.community.general.plugins.module_utils.mh.module_helper import StateModuleHelper
+from ansible_collections.community.general.plugins.module_utils.android_sdkmanager import Package, AndroidSdkManager
+
+
+class AndroidSdk(StateModuleHelper):
+ module = dict(
+ argument_spec=dict(
+ state=dict(type='str', default='present', choices=['present', 'absent', 'latest']),
+ package=dict(type='list', elements='str', aliases=['pkg', 'name']),
+ sdk_root=dict(type='path'),
+ channel=dict(type='str', default='stable', choices=['stable', 'beta', 'dev', 'canary']),
+ accept_licenses=dict(type='bool', default=False)
+ ),
+ supports_check_mode=True
+ )
+
+ def __init_module__(self):
+ self.sdkmanager = AndroidSdkManager(self.module)
+ self.vars.set('installed', [], change=True)
+ self.vars.set('removed', [], change=True)
+
+ def _parse_packages(self):
+ arg_pkgs = set(self.vars.package)
+ if len(arg_pkgs) < len(self.vars.package):
+ self.do_raise("Packages may not repeat")
+ return set([Package(p) for p in arg_pkgs])
+
+ def state_present(self):
+ packages = self._parse_packages()
+ installed = self.sdkmanager.get_installed_packages()
+ pending_installation = packages.difference(installed)
+
+ self.vars.installed = AndroidSdk._map_packages_to_names(pending_installation)
+ if not self.check_mode:
+ rc, stdout, stderr = self.sdkmanager.apply_packages_changes(pending_installation, self.vars.accept_licenses)
+ if rc != 0:
+ self.do_raise("Could not install packages: %s" % stderr)
+
+ def state_absent(self):
+ packages = self._parse_packages()
+ installed = self.sdkmanager.get_installed_packages()
+ to_be_deleted = packages.intersection(installed)
+ self.vars.removed = AndroidSdk._map_packages_to_names(to_be_deleted)
+ if not self.check_mode:
+ rc, stdout, stderr = self.sdkmanager.apply_packages_changes(to_be_deleted)
+ if rc != 0:
+ self.do_raise("Could not uninstall packages: %s" % stderr)
+
+ def state_latest(self):
+ packages = self._parse_packages()
+ installed = self.sdkmanager.get_installed_packages()
+ updatable = self.sdkmanager.get_updatable_packages()
+ not_installed = packages.difference(installed)
+ to_be_installed = not_installed.union(updatable)
+ self.vars.installed = AndroidSdk._map_packages_to_names(to_be_installed)
+
+ if not self.check_mode:
+ rc, stdout, stderr = self.sdkmanager.apply_packages_changes(to_be_installed, self.vars.accept_licenses)
+ if rc != 0:
+ self.do_raise("Could not install packages: %s" % stderr)
+
+ @staticmethod
+ def _map_packages_to_names(packages):
+ return [x.name for x in packages]
+
+
+def main():
+ AndroidSdk.execute()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/plugins/modules/ansible_galaxy_install.py b/plugins/modules/ansible_galaxy_install.py
index 62de70bb63..4712ca9a3c 100644
--- a/plugins/modules/ansible_galaxy_install.py
+++ b/plugins/modules/ansible_galaxy_install.py
@@ -8,30 +8,27 @@
from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = """
----
+DOCUMENTATION = r"""
module: ansible_galaxy_install
author:
-- "Alexei Znamensky (@russoz)"
+ - "Alexei Znamensky (@russoz)"
short_description: Install Ansible roles or collections using ansible-galaxy
version_added: 3.5.0
description:
-- This module allows the installation of Ansible collections or roles using C(ansible-galaxy).
+ - This module allows the installation of Ansible collections or roles using C(ansible-galaxy).
notes:
-- Support for B(Ansible 2.9/2.10) was removed in community.general 8.0.0.
-- >
- The module will try and run using the C(C.UTF-8) locale.
- If that fails, it will try C(en_US.UTF-8).
- If that one also fails, the module will fail.
+ - Support for B(Ansible 2.9/2.10) was removed in community.general 8.0.0.
+ - The module tries to run using the C(C.UTF-8) locale. If that fails, it tries C(en_US.UTF-8). If that one also fails, the
+ module fails.
seealso:
-- name: C(ansible-galaxy) command manual page
- description: Manual page for the command.
- link: https://docs.ansible.com/ansible/latest/cli/ansible-galaxy.html
+ - name: C(ansible-galaxy) command manual page
+ description: Manual page for the command.
+ link: https://docs.ansible.com/ansible/latest/cli/ansible-galaxy.html
requirements:
-- ansible-core 2.11 or newer
+ - ansible-core 2.11 or newer
extends_documentation_fragment:
-- community.general.attributes
+ - community.general.attributes
attributes:
check_mode:
support: none
@@ -40,63 +37,59 @@ attributes:
options:
state:
description:
- - >
- If O(state=present) then the collection or role will be installed.
- Note that the collections and roles are not updated with this option.
- - >
- Currently the O(state=latest) is ignored unless O(type=collection), and it will
- ensure the collection is installed and updated to the latest available version.
- - Please note that O(force=true) can be used to perform upgrade regardless of O(type).
+ - If O(state=present) then the collection or role is installed. Note that the collections and roles are not updated
+ with this option.
+ - Currently the O(state=latest) is ignored unless O(type=collection), and it ensures the collection is installed and
+ updated to the latest available version.
+ - Please note that O(force=true) can be used to perform upgrade regardless of O(type).
type: str
choices: [present, latest]
default: present
version_added: 9.1.0
type:
description:
- - The type of installation performed by C(ansible-galaxy).
- - If O(type=both), then O(requirements_file) must be passed and it may contain both roles and collections.
- - "Note however that the opposite is not true: if using a O(requirements_file), then O(type) can be any of the three choices."
+ - The type of installation performed by C(ansible-galaxy).
+ - If O(type=both), then O(requirements_file) must be passed and it may contain both roles and collections.
+ - 'Note however that the opposite is not true: if using a O(requirements_file), then O(type) can be any of the three
+ choices.'
type: str
choices: [collection, role, both]
required: true
name:
description:
- - Name of the collection or role being installed.
- - >
- Versions can be specified with C(ansible-galaxy) usual formats.
- For example, the collection V(community.docker:1.6.1) or the role V(ansistrano.deploy,3.8.0).
- - O(name) and O(requirements_file) are mutually exclusive.
+ - Name of the collection or role being installed.
+ - Versions can be specified with C(ansible-galaxy) usual formats. For example, the collection V(community.docker:1.6.1)
+ or the role V(ansistrano.deploy,3.8.0).
+ - O(name) and O(requirements_file) are mutually exclusive.
type: str
requirements_file:
description:
- - Path to a file containing a list of requirements to be installed.
- - It works for O(type) equals to V(collection) and V(role).
- - O(name) and O(requirements_file) are mutually exclusive.
+ - Path to a file containing a list of requirements to be installed.
+ - It works for O(type) equals to V(collection) and V(role).
+ - O(name) and O(requirements_file) are mutually exclusive.
type: path
dest:
description:
- - The path to the directory containing your collections or roles, according to the value of O(type).
- - >
- Please notice that C(ansible-galaxy) will not install collections with O(type=both), when O(requirements_file)
- contains both roles and collections and O(dest) is specified.
+ - The path to the directory containing your collections or roles, according to the value of O(type).
+ - Please notice that C(ansible-galaxy) does not install collections with O(type=both), when O(requirements_file) contains
+ both roles and collections and O(dest) is specified.
type: path
no_deps:
description:
- - Refrain from installing dependencies.
+ - Refrain from installing dependencies.
version_added: 4.5.0
type: bool
default: false
force:
description:
- - Force overwriting existing roles and/or collections.
- - It can be used for upgrading, but the module output will always report C(changed=true).
- - Using O(force=true) is mandatory when downgrading.
+ - Force overwriting existing roles and/or collections.
+ - It can be used for upgrading, but the module output always reports C(changed=true).
+ - Using O(force=true) is mandatory when downgrading.
type: bool
default: false
"""
-EXAMPLES = """
----
+EXAMPLES = r"""
- name: Install collection community.network
community.general.ansible_galaxy_install:
type: collection
@@ -120,8 +113,7 @@ EXAMPLES = """
force: true
"""
-RETURN = """
----
+RETURN = r"""
type:
description: The value of the O(type) parameter.
type: str
@@ -144,8 +136,8 @@ force:
returned: always
installed_roles:
description:
- - If O(requirements_file) is specified instead, returns dictionary with all the roles installed per path.
- - If O(name) is specified, returns that role name and the version installed per path.
+ - If O(requirements_file) is specified instead, returns dictionary with all the roles installed per path.
+ - If O(name) is specified, returns that role name and the version installed per path.
type: dict
returned: always when installing roles
contains:
@@ -160,13 +152,13 @@ installed_roles:
ansistrano.deploy: 3.8.0
installed_collections:
description:
- - If O(requirements_file) is specified instead, returns dictionary with all the collections installed per path.
- - If O(name) is specified, returns that collection name and the version installed per path.
+ - If O(requirements_file) is specified instead, returns dictionary with all the collections installed per path.
+ - If O(name) is specified, returns that collection name and the version installed per path.
type: dict
returned: always when installing collections
contains:
"":
- description: Collections and versions for that path
+ description: Collections and versions for that path.
type: dict
sample:
/home/az/.ansible/collections/ansible_collections:
@@ -188,6 +180,12 @@ new_roles:
sample:
ansistrano.deploy: 3.8.0
baztian.xfce: v0.0.3
+version:
+ description: Version of ansible-core for ansible-galaxy.
+ type: str
+ returned: always
+ sample: 2.17.4
+ version_added: 10.0.0
"""
import re
@@ -222,7 +220,6 @@ class AnsibleGalaxyInstall(ModuleHelper):
required_if=[('type', 'both', ['requirements_file'])],
supports_check_mode=False,
)
- use_old_vardict = False
command = 'ansible-galaxy'
command_args_formats = dict(
@@ -252,7 +249,6 @@ class AnsibleGalaxyInstall(ModuleHelper):
if not match:
self.do_raise("Unable to determine ansible-galaxy version from: {0}".format(line))
version = match.group("version")
- version = tuple(int(x) for x in version.split('.')[:3])
return version
try:
@@ -265,7 +261,8 @@ class AnsibleGalaxyInstall(ModuleHelper):
return runner, ctx.run()
def __init_module__(self):
- self.runner, self.ansible_version = self._get_ansible_galaxy_version()
+ self.runner, self.vars.version = self._get_ansible_galaxy_version()
+ self.ansible_version = tuple(int(x) for x in self.vars.version.split('.')[:3])
if self.ansible_version < (2, 11):
self.module.fail_json(msg="Support for Ansible 2.9 and ansible-base 2.10 has been removed.")
self.vars.set("new_collections", {}, change=True)
diff --git a/plugins/modules/apache2_mod_proxy.py b/plugins/modules/apache2_mod_proxy.py
index b112f6f582..3816845257 100644
--- a/plugins/modules/apache2_mod_proxy.py
+++ b/plugins/modules/apache2_mod_proxy.py
@@ -9,16 +9,13 @@ from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
module: apache2_mod_proxy
author: Olivier Boukili (@oboukili)
short_description: Set and/or get members' attributes of an Apache httpd 2.4 mod_proxy balancer pool
description:
- - Set and/or get members' attributes of an Apache httpd 2.4 mod_proxy balancer
- pool, using HTTP POST and GET requests. The httpd mod_proxy balancer-member
- status page has to be enabled and accessible, as this module relies on parsing
- this page.
+ - Set and/or get members' attributes of an Apache httpd 2.4 mod_proxy balancer pool, using HTTP POST and GET requests. The
+ httpd mod_proxy balancer-member status page has to be enabled and accessible, as this module relies on parsing this page.
extends_documentation_fragment:
- community.general.attributes
requirements:
@@ -32,28 +29,29 @@ options:
balancer_url_suffix:
type: str
description:
- - Suffix of the balancer pool url required to access the balancer pool
- status page (e.g. balancer_vhost[:port]/balancer_url_suffix).
+ - Suffix of the balancer pool URL required to access the balancer pool status page (for example V(balancer_vhost[:port]/balancer_url_suffix)).
default: /balancer-manager/
balancer_vhost:
type: str
description:
- - (ipv4|ipv6|fqdn):port of the Apache httpd 2.4 mod_proxy balancer pool.
+ - (IPv4|IPv6|FQDN):port of the Apache httpd 2.4 mod_proxy balancer pool.
required: true
member_host:
type: str
description:
- - (ipv4|ipv6|fqdn) of the balancer member to get or to set attributes to.
- Port number is autodetected and should not be specified here.
- If undefined, apache2_mod_proxy module will return a members list of
- dictionaries of all the current balancer pool members' attributes.
+ - (IPv4|IPv6|FQDN) of the balancer member to get or to set attributes to. Port number is autodetected and should not
+ be specified here.
+ - If undefined, the M(community.general.apache2_mod_proxy) module returns a members list of dictionaries of all the
+ current balancer pool members' attributes.
state:
- type: str
+ type: list
+ elements: str
+ choices: [present, absent, enabled, disabled, drained, hot_standby, ignore_errors]
description:
- Desired state of the member host.
- (absent|disabled),drained,hot_standby,ignore_errors can be
- simultaneously invoked by separating them with a comma (e.g. state=drained,ignore_errors).
- - 'Accepted state values: ["present", "absent", "enabled", "disabled", "drained", "hot_standby", "ignore_errors"]'
+ - States can be simultaneously invoked by separating them with a comma (for example V(state=drained,ignore_errors)),
+ but it is recommended to specify them as a proper YAML list.
+ - States V(present) and V(absent) must be used without any other state.
tls:
description:
- Use https to access balancer management page.
@@ -64,9 +62,9 @@ options:
- Validate ssl/tls certificates.
type: bool
default: true
-'''
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Get all current balancer pool members attributes
community.general.apache2_mod_proxy:
balancer_vhost: 10.0.0.2
@@ -111,57 +109,61 @@ EXAMPLES = '''
member_host: '{{ member.host }}'
state: absent
delegate_to: myloadbalancernode
-'''
+"""
-RETURN = '''
+RETURN = r"""
member:
- description: specific balancer member information dictionary, returned when apache2_mod_proxy module is invoked with member_host parameter.
- type: dict
- returned: success
- sample:
- {"attributes":
- {"Busy": "0",
- "Elected": "42",
- "Factor": "1",
- "From": "136K",
- "Load": "0",
- "Route": null,
- "RouteRedir": null,
- "Set": "0",
- "Status": "Init Ok ",
- "To": " 47K",
- "Worker URL": null
- },
- "balancer_url": "http://10.10.0.2/balancer-manager/",
- "host": "10.10.0.20",
- "management_url": "http://10.10.0.2/lb/?b=mywsbalancer&w=http://10.10.0.20:8080/ws&nonce=8925436c-79c6-4841-8936-e7d13b79239b",
- "path": "/ws",
- "port": 8080,
- "protocol": "http",
- "status": {
- "disabled": false,
- "drained": false,
- "hot_standby": false,
- "ignore_errors": false
- }
+ description: Specific balancer member information dictionary, returned when the module is invoked with O(member_host) parameter.
+ type: dict
+ returned: success
+ sample:
+ {
+ "attributes": {
+ "Busy": "0",
+ "Elected": "42",
+ "Factor": "1",
+ "From": "136K",
+ "Load": "0",
+ "Route": null,
+ "RouteRedir": null,
+ "Set": "0",
+ "Status": "Init Ok ",
+ "To": " 47K",
+ "Worker URL": null
+ },
+ "balancer_url": "http://10.10.0.2/balancer-manager/",
+ "host": "10.10.0.20",
+ "management_url": "http://10.10.0.2/lb/?b=mywsbalancer&w=http://10.10.0.20:8080/ws&nonce=8925436c-79c6-4841-8936-e7d13b79239b",
+ "path": "/ws",
+ "port": 8080,
+ "protocol": "http",
+ "status": {
+ "disabled": false,
+ "drained": false,
+ "hot_standby": false,
+ "ignore_errors": false
}
+ }
members:
- description: list of member (defined above) dictionaries, returned when apache2_mod_proxy is invoked with no member_host and state args.
- returned: success
- type: list
- sample:
- [{"attributes": {
- "Busy": "0",
- "Elected": "42",
- "Factor": "1",
- "From": "136K",
- "Load": "0",
- "Route": null,
- "RouteRedir": null,
- "Set": "0",
- "Status": "Init Ok ",
- "To": " 47K",
- "Worker URL": null
+ description: List of member (defined above) dictionaries, returned when the module is invoked with no O(member_host) and
+ O(state) args.
+ returned: success
+ type: list
+ sample:
+ [
+ {
+ "attributes": {
+ "Busy": "0",
+ "Elected": "42",
+ "Factor": "1",
+ "From": "136K",
+ "Load": "0",
+ "Route": null,
+ "RouteRedir": null,
+ "Set": "0",
+ "Status": "Init Ok ",
+ "To": " 47K",
+ "Worker URL": null
},
"balancer_url": "http://10.10.0.2/balancer-manager/",
"host": "10.10.0.20",
@@ -170,24 +172,25 @@ members:
"port": 8080,
"protocol": "http",
"status": {
- "disabled": false,
- "drained": false,
- "hot_standby": false,
- "ignore_errors": false
+ "disabled": false,
+ "drained": false,
+ "hot_standby": false,
+ "ignore_errors": false
}
- },
- {"attributes": {
- "Busy": "0",
- "Elected": "42",
- "Factor": "1",
- "From": "136K",
- "Load": "0",
- "Route": null,
- "RouteRedir": null,
- "Set": "0",
- "Status": "Init Ok ",
- "To": " 47K",
- "Worker URL": null
+ },
+ {
+ "attributes": {
+ "Busy": "0",
+ "Elected": "42",
+ "Factor": "1",
+ "From": "136K",
+ "Load": "0",
+ "Route": null,
+ "RouteRedir": null,
+ "Set": "0",
+ "Status": "Init Ok ",
+ "To": " 47K",
+ "Worker URL": null
},
"balancer_url": "http://10.10.0.2/balancer-manager/",
"host": "10.10.0.21",
@@ -196,38 +199,35 @@ members:
"port": 8080,
"protocol": "http",
"status": {
- "disabled": false,
- "drained": false,
- "hot_standby": false,
- "ignore_errors": false}
+ "disabled": false,
+ "drained": false,
+ "hot_standby": false,
+ "ignore_errors": false
}
- ]
-'''
+ }
+ ]
+"""
import re
-import traceback
-from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible_collections.community.general.plugins.module_utils import deps
+from ansible_collections.community.general.plugins.module_utils.module_helper import ModuleHelper, ModuleHelperException
+
from ansible.module_utils.common.text.converters import to_text
from ansible.module_utils.urls import fetch_url
-from ansible.module_utils.six import iteritems, PY2
+from ansible.module_utils.six import raise_from, PY2
-BEAUTIFUL_SOUP_IMP_ERR = None
-try:
- if PY2:
+if PY2:
+ with deps.declare("BeautifulSoup"):
from BeautifulSoup import BeautifulSoup
- else:
- from bs4 import BeautifulSoup
-except ImportError:
- BEAUTIFUL_SOUP_IMP_ERR = traceback.format_exc()
- HAS_BEAUTIFULSOUP = False
else:
- HAS_BEAUTIFULSOUP = True
+ with deps.declare("beautifulsoup4"):
+ from bs4 import BeautifulSoup
# balancer member attributes extraction regexp:
-EXPRESSION = to_text(r"(b=([\w\.\-]+)&w=(https?|ajp|wss?|ftp|[sf]cgi)://([\w\.\-]+):?(\d*)([/\w\.\-]*)&?[\w\-\=]*)")
+EXPRESSION = re.compile(to_text(r"(b=([\w\.\-]+)&w=(https?|ajp|wss?|ftp|[sf]cgi)://([\w\.\-]+):?(\d*)([/\w\.\-]*)&?[\w\-\=]*)"))
# Apache2 server version extraction regexp:
-APACHE_VERSION_EXPRESSION = to_text(r"SERVER VERSION: APACHE/([\d.]+)")
+APACHE_VERSION_EXPRESSION = re.compile(to_text(r"SERVER VERSION: APACHE/([\d.]+)"))
def find_all(where, what):
@@ -238,10 +238,10 @@ def find_all(where, what):
def regexp_extraction(string, _regexp, groups=1):
""" Returns the capture group (default=1) specified in the regexp, applied to the string """
- regexp_search = re.search(string=str(string), pattern=str(_regexp))
+ regexp_search = _regexp.search(string)
if regexp_search:
if regexp_search.group(groups) != '':
- return str(regexp_search.group(groups))
+ return regexp_search.group(groups)
return None
@@ -262,33 +262,33 @@ class BalancerMember(object):
"""
def __init__(self, management_url, balancer_url, module):
- self.host = regexp_extraction(management_url, str(EXPRESSION), 4)
- self.management_url = str(management_url)
+ self.host = regexp_extraction(management_url, EXPRESSION, 4)
+ self.management_url = management_url
self.protocol = regexp_extraction(management_url, EXPRESSION, 3)
self.port = regexp_extraction(management_url, EXPRESSION, 5)
self.path = regexp_extraction(management_url, EXPRESSION, 6)
- self.balancer_url = str(balancer_url)
+ self.balancer_url = balancer_url
self.module = module
def get_member_attributes(self):
""" Returns a dictionary of a balancer member's attributes."""
- balancer_member_page = fetch_url(self.module, self.management_url, headers={'Referer': self.management_url})
+ resp, info = fetch_url(self.module, self.management_url, headers={'Referer': self.management_url})
- if balancer_member_page[1]['status'] != 200:
- self.module.fail_json(msg="Could not get balancer_member_page, check for connectivity! " + balancer_member_page[1])
- else:
- try:
- soup = BeautifulSoup(balancer_member_page[0])
- except TypeError as exc:
- self.module.fail_json(msg="Cannot parse balancer_member_page HTML! " + str(exc))
- else:
- subsoup = find_all(find_all(soup, 'table')[1], 'tr')
- keys = find_all(subsoup[0], 'th')
- for valuesset in subsoup[1::1]:
- if re.search(pattern=self.host, string=str(valuesset)):
- values = find_all(valuesset, 'td')
- return {keys[x].string: values[x].string for x in range(0, len(keys))}
+ if info['status'] != 200:
+ raise ModuleHelperException("Could not get balancer_member_page, check for connectivity! {0}".format(info))
+
+ try:
+ soup = BeautifulSoup(resp)
+ except TypeError as exc:
+ raise_from(ModuleHelperException("Cannot parse balancer_member_page HTML! {0}".format(exc)), exc)
+
+ subsoup = find_all(find_all(soup, 'table')[1], 'tr')
+ keys = find_all(subsoup[0], 'th')
+ for valuesset in subsoup[1::1]:
+ if re.search(pattern=self.host, string=str(valuesset)):
+ values = find_all(valuesset, 'td')
+ return {keys[x].string: values[x].string for x in range(0, len(keys))}
def get_member_status(self):
""" Returns a dictionary of a balancer member's status attributes."""
@@ -296,8 +296,8 @@ class BalancerMember(object):
'drained': 'Drn',
'hot_standby': 'Stby',
'ignore_errors': 'Ign'}
- actual_status = str(self.attributes['Status'])
- status = {mode: patt in actual_status for mode, patt in iteritems(status_mapping)}
+ actual_status = self.attributes['Status']
+ status = {mode: patt in actual_status for mode, patt in status_mapping.items()}
return status
def set_member_status(self, values):
@@ -308,160 +308,125 @@ class BalancerMember(object):
'ignore_errors': '&w_status_I'}
request_body = regexp_extraction(self.management_url, EXPRESSION, 1)
- values_url = "".join("{0}={1}".format(url_param, 1 if values[mode] else 0) for mode, url_param in iteritems(values_mapping))
+ values_url = "".join("{0}={1}".format(url_param, 1 if values[mode] else 0) for mode, url_param in values_mapping.items())
request_body = "{0}{1}".format(request_body, values_url)
- response = fetch_url(self.module, self.management_url, data=request_body, headers={'Referer': self.management_url})
- if response[1]['status'] != 200:
- self.module.fail_json(msg="Could not set the member status! {host} {status}".format(host=self.host, status=response[1]['status']))
+ response, info = fetch_url(self.module, self.management_url, data=request_body, headers={'Referer': self.management_url})
+ if info['status'] != 200:
+ raise ModuleHelperException("Could not set the member status! {0} {1}".format(self.host, info['status']))
attributes = property(get_member_attributes)
status = property(get_member_status, set_member_status)
+ def as_dict(self):
+ return {
+ "host": self.host,
+ "status": self.status,
+ "protocol": self.protocol,
+ "port": self.port,
+ "path": self.path,
+ "attributes": self.attributes,
+ "management_url": self.management_url,
+ "balancer_url": self.balancer_url
+ }
+
class Balancer(object):
""" Apache httpd 2.4 mod_proxy balancer object"""
- def __init__(self, host, suffix, module, members=None, tls=False):
- if tls:
- self.base_url = 'https://' + str(host)
- self.url = 'https://' + str(host) + str(suffix)
- else:
- self.base_url = 'http://' + str(host)
- self.url = 'http://' + str(host) + str(suffix)
+ def __init__(self, module, host, suffix, tls=False):
+ proto = "https" if tls else "http"
+ self.base_url = '{0}://{1}'.format(proto, host)
+ self.url = '{0}://{1}{2}'.format(proto, host, suffix)
self.module = module
self.page = self.fetch_balancer_page()
- if members is None:
- self._members = []
def fetch_balancer_page(self):
""" Returns the balancer management html page as a string for later parsing."""
- page = fetch_url(self.module, str(self.url))
- if page[1]['status'] != 200:
- self.module.fail_json(msg="Could not get balancer page! HTTP status response: " + str(page[1]['status']))
- else:
- content = to_text(page[0].read())
- apache_version = regexp_extraction(content.upper(), APACHE_VERSION_EXPRESSION, 1)
- if apache_version:
- if not re.search(pattern=r"2\.4\.[\d]*", string=apache_version):
- self.module.fail_json(
- msg="This module only acts on an Apache2 2.4+ instance, current Apache2 version: {version}".format(
- version=apache_version
- )
- )
- return content
- else:
- self.module.fail_json(msg="Could not get the Apache server version from the balancer-manager")
+ resp, info = fetch_url(self.module, self.url)
+ if info['status'] != 200:
+ raise ModuleHelperException("Could not get balancer page! HTTP status response: {0}".format(info['status']))
+
+ content = to_text(resp.read())
+ apache_version = regexp_extraction(content.upper(), APACHE_VERSION_EXPRESSION, 1)
+ if not apache_version:
+ raise ModuleHelperException("Could not get the Apache server version from the balancer-manager")
+
+ if not re.search(pattern=r"2\.4\.[\d]*", string=apache_version):
+ raise ModuleHelperException("This module only acts on an Apache2 2.4+ instance, current Apache2 version: {0}".format(apache_version))
+ return content
def get_balancer_members(self):
""" Returns members of the balancer as a generator object for later iteration."""
try:
soup = BeautifulSoup(self.page)
- except TypeError:
- self.module.fail_json(msg="Cannot parse balancer page HTML! " + str(self.page))
- else:
- elements = find_all(soup, 'a')
- for element in elements[1::1]:
- balancer_member_suffix = str(element.get('href'))
- if not balancer_member_suffix:
- self.module.fail_json(msg="Argument 'balancer_member_suffix' is empty!")
- else:
- yield BalancerMember(str(self.base_url + balancer_member_suffix), str(self.url), self.module)
+ except TypeError as e:
+ raise_from(ModuleHelperException("Cannot parse balancer page HTML! {0}".format(self.page)), e)
+
+ elements = find_all(soup, 'a')
+ for element in elements[1::1]:
+ balancer_member_suffix = element.get('href')
+ if not balancer_member_suffix:
+ raise ModuleHelperException("Argument 'balancer_member_suffix' is empty!")
+
+ yield BalancerMember(self.base_url + balancer_member_suffix, self.url, self.module)
members = property(get_balancer_members)
-def main():
+class ApacheModProxy(ModuleHelper):
""" Initiates module."""
- module = AnsibleModule(
+ module = dict(
argument_spec=dict(
balancer_vhost=dict(required=True, type='str'),
balancer_url_suffix=dict(default="/balancer-manager/", type='str'),
member_host=dict(type='str'),
- state=dict(type='str'),
+ state=dict(type='list', elements='str', choices=['present', 'absent', 'enabled', 'disabled', 'drained', 'hot_standby', 'ignore_errors']),
tls=dict(default=False, type='bool'),
validate_certs=dict(default=True, type='bool')
),
supports_check_mode=True
)
- if HAS_BEAUTIFULSOUP is False:
- module.fail_json(msg=missing_required_lib('BeautifulSoup'), exception=BEAUTIFUL_SOUP_IMP_ERR)
+ def __init_module__(self):
+ deps.validate(self.module)
- if module.params['state'] is not None:
- states = module.params['state'].split(',')
- if (len(states) > 1) and (("present" in states) or ("enabled" in states)):
- module.fail_json(msg="state present/enabled is mutually exclusive with other states!")
+ if len(self.vars.state or []) > 1 and ("present" in self.vars.state or "enabled" in self.vars.state):
+ self.do_raise(msg="states present/enabled are mutually exclusive with other states!")
+
+ self.mybalancer = Balancer(self.module, self.vars.balancer_vhost, self.vars.balancer_url_suffix, tls=self.vars.tls)
+
+ def __run__(self):
+ if self.vars.member_host is None:
+ self.vars.members = [member.as_dict() for member in self.mybalancer.members]
else:
- for _state in states:
- if _state not in ['present', 'absent', 'enabled', 'disabled', 'drained', 'hot_standby', 'ignore_errors']:
- module.fail_json(
- msg="State can only take values amongst 'present', 'absent', 'enabled', 'disabled', 'drained', 'hot_standby', 'ignore_errors'."
- )
- else:
- states = ['None']
+ member_exists = False
+ member_status = {'disabled': False, 'drained': False, 'hot_standby': False, 'ignore_errors': False}
+ for mode in member_status:
+ for state in self.vars.state or []:
+ if mode == state:
+ member_status[mode] = True
+ elif mode == 'disabled' and state == 'absent':
+ member_status[mode] = True
- mybalancer = Balancer(module.params['balancer_vhost'],
- module.params['balancer_url_suffix'],
- module=module,
- tls=module.params['tls'])
+ for member in self.mybalancer.members:
+ if str(member.host) == self.vars.member_host:
+ member_exists = True
+ if self.vars.state is not None:
+ member_status_before = member.status
+ if not self.check_mode:
+ member_status_after = member.status = member_status
+ else:
+ member_status_after = member_status
+ self.changed |= (member_status_before != member_status_after)
+ self.vars.member = member.as_dict()
- if module.params['member_host'] is None:
- json_output_list = []
- for member in mybalancer.members:
- json_output_list.append({
- "host": member.host,
- "status": member.status,
- "protocol": member.protocol,
- "port": member.port,
- "path": member.path,
- "attributes": member.attributes,
- "management_url": member.management_url,
- "balancer_url": member.balancer_url
- })
- module.exit_json(
- changed=False,
- members=json_output_list
- )
- else:
- changed = False
- member_exists = False
- member_status = {'disabled': False, 'drained': False, 'hot_standby': False, 'ignore_errors': False}
- for mode in member_status.keys():
- for state in states:
- if mode == state:
- member_status[mode] = True
- elif mode == 'disabled' and state == 'absent':
- member_status[mode] = True
+ if not member_exists:
+ self.do_raise(msg='{0} is not a member of the balancer {1}!'.format(self.vars.member_host, self.vars.balancer_vhost))
- for member in mybalancer.members:
- if str(member.host) == str(module.params['member_host']):
- member_exists = True
- if module.params['state'] is not None:
- member_status_before = member.status
- if not module.check_mode:
- member_status_after = member.status = member_status
- else:
- member_status_after = member_status
- if member_status_before != member_status_after:
- changed = True
- json_output = {
- "host": member.host,
- "status": member.status,
- "protocol": member.protocol,
- "port": member.port,
- "path": member.path,
- "attributes": member.attributes,
- "management_url": member.management_url,
- "balancer_url": member.balancer_url
- }
- if member_exists:
- module.exit_json(
- changed=changed,
- member=json_output
- )
- else:
- module.fail_json(msg=str(module.params['member_host']) + ' is not a member of the balancer ' + str(module.params['balancer_vhost']) + '!')
+
+def main():
+ ApacheModProxy.execute()
if __name__ == '__main__':
diff --git a/plugins/modules/apache2_module.py b/plugins/modules/apache2_module.py
index cf11dc5014..99db968670 100644
--- a/plugins/modules/apache2_module.py
+++ b/plugins/modules/apache2_module.py
@@ -9,66 +9,64 @@ from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
module: apache2_module
author:
- - Christian Berendt (@berendt)
- - Ralf Hertel (@n0trax)
- - Robin Roth (@robinro)
+ - Christian Berendt (@berendt)
+ - Ralf Hertel (@n0trax)
+ - Robin Roth (@robinro)
short_description: Enables/disables a module of the Apache2 webserver
description:
- - Enables or disables a specified module of the Apache2 webserver.
+ - Enables or disables a specified module of the Apache2 webserver.
extends_documentation_fragment:
- - community.general.attributes
+ - community.general.attributes
attributes:
- check_mode:
- support: full
- diff_mode:
- support: none
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
options:
- name:
- type: str
- description:
- - Name of the module to enable/disable as given to C(a2enmod/a2dismod).
- required: true
- identifier:
- type: str
- description:
- - Identifier of the module as listed by C(apache2ctl -M).
- This is optional and usually determined automatically by the common convention of
- appending V(_module) to O(name) as well as custom exception for popular modules.
- required: false
- force:
- description:
- - Force disabling of default modules and override Debian warnings.
- required: false
- type: bool
- default: false
- state:
- type: str
- description:
- - Desired state of the module.
- choices: ['present', 'absent']
- default: present
- ignore_configcheck:
- description:
- - Ignore configuration checks about inconsistent module configuration. Especially for mpm_* modules.
- type: bool
- default: false
- warn_mpm_absent:
- description:
- - Control the behavior of the warning process for MPM modules.
- type: bool
- default: true
- version_added: 6.3.0
-requirements: ["a2enmod","a2dismod"]
+ name:
+ type: str
+ description:
+ - Name of the module to enable/disable as given to C(a2enmod)/C(a2dismod).
+ required: true
+ identifier:
+ type: str
+ description:
+ - Identifier of the module as listed by C(apache2ctl -M). This is optional and usually determined automatically by the
+ common convention of appending V(_module) to O(name) as well as custom exception for popular modules.
+ required: false
+ force:
+ description:
+ - Force disabling of default modules and override Debian warnings.
+ required: false
+ type: bool
+ default: false
+ state:
+ type: str
+ description:
+ - Desired state of the module.
+ choices: ['present', 'absent']
+ default: present
+ ignore_configcheck:
+ description:
+ - Ignore configuration checks about inconsistent module configuration. Especially for mpm_* modules.
+ type: bool
+ default: false
+ warn_mpm_absent:
+ description:
+ - Control the behavior of the warning process for MPM modules.
+ type: bool
+ default: true
+ version_added: 6.3.0
+requirements: ["a2enmod", "a2dismod"]
notes:
- - This does not work on RedHat-based distributions. It does work on Debian- and SuSE-based distributions.
- Whether it works on others depend on whether the C(a2enmod) and C(a2dismod) tools are available or not.
-'''
+ - This does not work on RedHat-based distributions. It does work on Debian- and SuSE-based distributions. Whether it works
+ on others depend on whether the C(a2enmod) and C(a2dismod) tools are available or not.
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Enable the Apache2 module wsgi
community.general.apache2_module:
state: present
@@ -98,40 +96,24 @@ EXAMPLES = '''
warn_mpm_absent: false
ignore_configcheck: true
loop:
- - module: mpm_event
- state: absent
- - module: mpm_prefork
- state: present
+ - module: mpm_event
+ state: absent
+ - module: mpm_prefork
+ state: present
- name: Enable dump_io module, which is identified as dumpio_module inside apache2
community.general.apache2_module:
state: present
name: dump_io
identifier: dumpio_module
-'''
+"""
-RETURN = '''
+RETURN = r"""
result:
- description: message about action taken
- returned: always
- type: str
-warnings:
- description: list of warning messages
- returned: when needed
- type: list
-rc:
- description: return code of underlying command
- returned: failed
- type: int
-stdout:
- description: stdout of underlying command
- returned: failed
- type: str
-stderr:
- description: stderr of underlying command
- returned: failed
- type: str
-'''
+ description: Message about action taken.
+ returned: always
+ type: str
+"""
import re
@@ -166,12 +148,12 @@ def _module_is_enabled(module):
if module.params['ignore_configcheck']:
if 'AH00534' in stderr and 'mpm_' in module.params['name']:
if module.params['warn_mpm_absent']:
- module.warnings.append(
+ module.warn(
"No MPM module loaded! apache2 reload AND other module actions"
" will fail if no MPM module is loaded immediately."
)
else:
- module.warnings.append(error_msg)
+ module.warn(error_msg)
return False
else:
module.fail_json(msg=error_msg)
@@ -226,9 +208,7 @@ def _set_state(module, state):
if _module_is_enabled(module) != want_enabled:
if module.check_mode:
- module.exit_json(changed=True,
- result=success_msg,
- warnings=module.warnings)
+ module.exit_json(changed=True, result=success_msg)
a2mod_binary_path = module.get_bin_path(a2mod_binary)
if a2mod_binary_path is None:
@@ -243,9 +223,7 @@ def _set_state(module, state):
result, stdout, stderr = module.run_command(a2mod_binary_cmd + [name])
if _module_is_enabled(module) == want_enabled:
- module.exit_json(changed=True,
- result=success_msg,
- warnings=module.warnings)
+ module.exit_json(changed=True, result=success_msg)
else:
msg = (
'Failed to set module {name} to {state}:\n'
@@ -263,9 +241,7 @@ def _set_state(module, state):
stdout=stdout,
stderr=stderr)
else:
- module.exit_json(changed=False,
- result=success_msg,
- warnings=module.warnings)
+ module.exit_json(changed=False, result=success_msg)
def main():
@@ -281,8 +257,6 @@ def main():
supports_check_mode=True,
)
- module.warnings = []
-
name = module.params['name']
if name == 'cgi' and _run_threaded(module):
module.fail_json(msg="Your MPM seems to be threaded. No automatic actions on module cgi possible.")
diff --git a/plugins/modules/apk.py b/plugins/modules/apk.py
index 7caefd1357..e70e51a1f0 100644
--- a/plugins/modules/apk.py
+++ b/plugins/modules/apk.py
@@ -12,8 +12,7 @@ from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
module: apk
short_description: Manages apk packages
description:
@@ -29,15 +28,15 @@ attributes:
options:
available:
description:
- - During upgrade, reset versioned world dependencies and change logic to prefer replacing or downgrading packages (instead of holding them)
- if the currently installed package is no longer available from any repository.
+ - During upgrade, reset versioned world dependencies and change logic to prefer replacing or downgrading packages (instead
+ of holding them) if the currently installed package is no longer available from any repository.
type: bool
default: false
name:
description:
- A package name, like V(foo), or multiple packages, like V(foo,bar).
- - Do not include additional whitespace when specifying multiple packages as a string.
- Prefer YAML lists over comma-separating multiple package names.
+ - Do not include additional whitespace when specifying multiple packages as a string. Prefer YAML lists over comma-separating
+ multiple package names.
type: list
elements: str
no_cache:
@@ -48,8 +47,8 @@ options:
version_added: 1.0.0
repository:
description:
- - A package repository or multiple repositories.
- Unlike with the underlying apk command, this list will override the system repositories rather than supplement them.
+ - A package repository or multiple repositories. Unlike with the underlying apk command, this list overrides the system
+ repositories rather than supplement them.
type: list
elements: str
state:
@@ -59,7 +58,7 @@ options:
- V(absent) ensures the package(s) is/are absent. V(removed) can be used as an alias.
- V(latest) ensures the package(s) is/are present and the latest version(s).
default: present
- choices: [ "present", "absent", "latest", "installed", "removed" ]
+ choices: ["present", "absent", "latest", "installed", "removed"]
type: str
update_cache:
description:
@@ -73,17 +72,18 @@ options:
default: false
world:
description:
- - Use a custom world file when checking for explicitly installed packages.
- The file is used only when a value is provided for O(name), and O(state) is set to V(present) or V(latest).
+ - Use a custom world file when checking for explicitly installed packages. The file is used only when a value is provided
+ for O(name), and O(state) is set to V(present) or V(latest).
type: str
default: /etc/apk/world
version_added: 5.4.0
notes:
- - 'O(name) and O(upgrade) are mutually exclusive.'
- - When used with a C(loop:) each package will be processed individually, it is much more efficient to pass the list directly to the O(name) option.
-'''
+ - O(name) and O(upgrade) are mutually exclusive.
+ - When used with a C(loop:) each package is processed individually, it is much more efficient to pass the list directly
+ to the O(name) option.
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Update repositories and install foo package
community.general.apk:
name: foo
@@ -157,15 +157,15 @@ EXAMPLES = '''
name: foo
state: latest
world: /etc/apk/world.custom
-'''
+"""
-RETURN = '''
+RETURN = r"""
packages:
- description: a list of packages that have been changed
- returned: when packages have changed
- type: list
- sample: ['package', 'other-package']
-'''
+ description: A list of packages that have been changed.
+ returned: when packages have changed
+ type: list
+ sample: ["package", "other-package"]
+"""
import re
# Import module snippets.
@@ -351,6 +351,9 @@ def main():
p = module.params
+ if p['name'] and any(not name.strip() for name in p['name']):
+ module.fail_json(msg="Package name(s) cannot be empty or whitespace-only")
+
if p['no_cache']:
APK_PATH = "%s --no-cache" % (APK_PATH, )
diff --git a/plugins/modules/apt_repo.py b/plugins/modules/apt_repo.py
index 4c82587d03..87df0064ca 100644
--- a/plugins/modules/apt_repo.py
+++ b/plugins/modules/apt_repo.py
@@ -9,16 +9,15 @@
from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
module: apt_repo
-short_description: Manage APT repositories via apt-repo
+short_description: Manage APT repositories using C(apt-repo)
description:
- - Manages APT repositories using apt-repo tool.
- - See U(https://www.altlinux.org/Apt-repo) for details about apt-repo
+ - Manages APT repositories using C(apt-repo) tool.
+ - See U(https://www.altlinux.org/Apt-repo) for details about C(apt-repo).
notes:
- - This module works on ALT based distros.
- - Does NOT support checkmode, due to a limitation in apt-repo tool.
+ - This module works on ALT based distros.
+ - Does NOT support checkmode, due to a limitation in C(apt-repo) tool.
extends_documentation_fragment:
- community.general.attributes
attributes:
@@ -35,13 +34,13 @@ options:
state:
description:
- Indicates the desired repository state.
- choices: [ absent, present ]
+ choices: [absent, present]
default: present
type: str
remove_others:
description:
- - Remove other then added repositories
- - Used if O(state=present)
+ - Remove other then added repositories.
+ - Used if O(state=present).
type: bool
default: false
update:
@@ -50,10 +49,10 @@ options:
type: bool
default: false
author:
-- Mikhail Gordeev (@obirvalger)
-'''
+ - Mikhail Gordeev (@obirvalger)
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Remove all repositories
community.general.apt_repo:
repo: all
@@ -70,9 +69,9 @@ EXAMPLES = '''
repo: copy:///space/ALT/Sisyphus
state: present
update: true
-'''
+"""
-RETURN = ''' # '''
+RETURN = """ # """
import os
diff --git a/plugins/modules/apt_rpm.py b/plugins/modules/apt_rpm.py
index 3a0b6d805f..1dcca5815c 100644
--- a/plugins/modules/apt_rpm.py
+++ b/plugins/modules/apt_rpm.py
@@ -11,8 +11,7 @@
from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
module: apt_rpm
short_description: APT-RPM package manager
description:
@@ -28,19 +27,17 @@ options:
package:
description:
- List of packages to install, upgrade, or remove.
- - Since community.general 8.0.0, may include paths to local C(.rpm) files
- if O(state=installed) or O(state=present), requires C(rpm) python
- module.
- aliases: [ name, pkg ]
+ - Since community.general 8.0.0, may include paths to local C(.rpm) files if O(state=installed) or O(state=present),
+ requires C(rpm) Python module.
+ aliases: [name, pkg]
type: list
elements: str
state:
description:
- Indicates the desired package state.
- - Please note that V(present) and V(installed) are equivalent to V(latest) right now.
- This will change in the future. To simply ensure that a package is installed, without upgrading
- it, use the V(present_not_latest) state.
- The states V(latest) and V(present_not_latest) have been added in community.general 8.6.0.
+ - Please note before community.general 11.0.0, V(present) and V(installed) were equivalent to V(latest). This changed
+ in community.general 11.0.0. Now they are equivalent to V(present_not_latest).
choices:
- absent
- present
@@ -52,14 +49,15 @@ options:
type: str
update_cache:
description:
- - Run the equivalent of C(apt-get update) before the operation. Can be run as part of the package installation or as a separate step.
+ - Run the equivalent of C(apt-get update) before the operation. Can be run as part of the package installation or as
+ a separate step.
- Default is not to update the cache.
type: bool
default: false
clean:
description:
- - Run the equivalent of C(apt-get clean) to clear out the local repository of retrieved package files. It removes everything but
- the lock file from C(/var/cache/apt/archives/) and C(/var/cache/apt/archives/partial/).
+ - Run the equivalent of C(apt-get clean) to clear out the local repository of retrieved package files. It removes everything
+ but the lock file from C(/var/cache/apt/archives/) and C(/var/cache/apt/archives/partial/).
- Can be run as part of the package installation (clean runs before install) or as a separate step.
type: bool
default: false
@@ -77,13 +75,12 @@ options:
default: false
version_added: 6.5.0
requirements:
- - C(rpm) python package (rpm bindings), optional. Required if O(package)
- option includes local files.
+ - C(rpm) Python package (rpm bindings), optional. Required if O(package) option includes local files.
author:
-- Evgenii Terechkov (@evgkrsk)
-'''
+ - Evgenii Terechkov (@evgkrsk)
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Install package foo
community.general.apt_rpm:
pkg: foo
@@ -122,7 +119,7 @@ EXAMPLES = '''
update_cache: true
dist_upgrade: true
update_kernel: true
-'''
+"""
import os
import re
@@ -310,17 +307,6 @@ def main():
module.fail_json(msg="cannot find /usr/bin/apt-get and/or /usr/bin/rpm")
p = module.params
- if p['state'] in ['installed', 'present']:
- module.deprecate(
- 'state=%s currently behaves unexpectedly by always upgrading to the latest version if'
- ' the package is already installed. This behavior is deprecated and will change in'
- ' community.general 11.0.0. You can use state=latest to explicitly request this behavior'
- ' or state=present_not_latest to explicitly request the behavior that state=%s will have'
- ' in community.general 11.0.0, namely that the package will not be upgraded if it is'
- ' already installed.' % (p['state'], p['state']),
- version='11.0.0',
- collection_name='community.general',
- )
modified = False
output = ""
@@ -344,7 +330,7 @@ def main():
packages = p['package']
if p['state'] in ['installed', 'present', 'present_not_latest', 'latest']:
- (m, out) = install_packages(module, packages, allow_upgrade=p['state'] != 'present_not_latest')
+ (m, out) = install_packages(module, packages, allow_upgrade=p['state'] == 'latest')
modified = modified or m
output += out
diff --git a/plugins/modules/archive.py b/plugins/modules/archive.py
index 6784aa1ac3..65b397c255 100644
--- a/plugins/modules/archive.py
+++ b/plugins/modules/archive.py
@@ -10,17 +10,16 @@
from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = r'''
----
+DOCUMENTATION = r"""
module: archive
short_description: Creates a compressed archive of one or more files or trees
extends_documentation_fragment:
- - files
- - community.general.attributes
+ - files
+ - community.general.attributes
description:
- - Creates or extends an archive.
- - The source and archive are on the remote host, and the archive I(is not) copied to the local host.
- - Source files can be deleted after archival by specifying O(remove=True).
+ - Creates or extends an archive.
+ - The source and archive are on the target host, and the archive I(is not) copied to the controller host.
+ - Source files can be deleted after archival by specifying O(remove=True).
attributes:
check_mode:
support: full
@@ -37,17 +36,19 @@ options:
description:
- The type of compression to use.
type: str
- choices: [ bz2, gz, tar, xz, zip ]
+ choices: [bz2, gz, tar, xz, zip]
default: gz
dest:
description:
- The file name of the destination archive. The parent directory must exists on the remote host.
- - This is required when O(path) refers to multiple files by either specifying a glob, a directory or multiple paths in a list.
- - If the destination archive already exists, it will be truncated and overwritten.
+ - This is required when O(path) refers to multiple files by either specifying a glob, a directory or multiple paths
+ in a list.
+ - If the destination archive already exists, it is truncated and overwritten.
type: path
exclude_path:
description:
- - Remote absolute path, glob, or list of paths or globs for the file or files to exclude from O(path) list and glob expansion.
+ - Remote absolute path, glob, or list of paths or globs for the file or files to exclude from O(path) list and glob
+ expansion.
- Use O(exclusion_patterns) to instead exclude files or subdirectories below any of the paths from the O(path) list.
type: list
elements: path
@@ -72,18 +73,19 @@ options:
type: bool
default: false
notes:
- - Can produce C(gzip), C(bzip2), C(lzma), and C(zip) compressed files or archives.
- - This module uses C(tarfile), C(zipfile), C(gzip), and C(bz2) packages on the target host to create archives.
- These are part of the Python standard library for Python 2 and 3.
+ - Can produce C(gzip), C(bzip2), C(lzma), and C(zip) compressed files or archives.
+ - This module uses C(tarfile), C(zipfile), C(gzip), and C(bz2) packages on the target host to create archives. These are
+ part of the Python standard library for Python 2 and 3.
requirements:
- - Requires C(lzma) (standard library of Python 3) or L(backports.lzma, https://pypi.org/project/backports.lzma/) (Python 2) if using C(xz) format.
+ - Requires C(lzma) (standard library of Python 3) or L(backports.lzma, https://pypi.org/project/backports.lzma/) (Python
+ 2) if using C(xz) format.
seealso:
- - module: ansible.builtin.unarchive
+ - module: ansible.builtin.unarchive
author:
- - Ben Doherty (@bendoh)
-'''
+ - Ben Doherty (@bendoh)
+"""
-EXAMPLES = r'''
+EXAMPLES = r"""
- name: Compress directory /path/to/foo/ into /path/to/foo.tgz
community.general.archive:
path: /path/to/foo
@@ -102,28 +104,28 @@ EXAMPLES = r'''
- name: Create a bz2 archive of multiple files, rooted at /path
community.general.archive:
path:
- - /path/to/foo
- - /path/wong/foo
+ - /path/to/foo
+ - /path/wong/foo
dest: /path/file.tar.bz2
format: bz2
- name: Create a bz2 archive of a globbed path, while excluding specific dirnames
community.general.archive:
path:
- - /path/to/foo/*
+ - /path/to/foo/*
dest: /path/file.tar.bz2
exclude_path:
- - /path/to/foo/bar
- - /path/to/foo/baz
+ - /path/to/foo/bar
+ - /path/to/foo/baz
format: bz2
- name: Create a bz2 archive of a globbed path, while excluding a glob of dirnames
community.general.archive:
path:
- - /path/to/foo/*
+ - /path/to/foo/*
dest: /path/file.tar.bz2
exclude_path:
- - /path/to/foo/ba*
+ - /path/to/foo/ba*
format: bz2
- name: Use gzip to compress a single archive (i.e don't archive it first with tar)
@@ -138,45 +140,44 @@ EXAMPLES = r'''
dest: /path/file.tar.gz
format: gz
force_archive: true
-'''
+"""
-RETURN = r'''
+RETURN = r"""
state:
- description:
- The state of the input O(path).
- type: str
- returned: always
+ description: The state of the input O(path).
+ type: str
+ returned: always
dest_state:
- description:
- - The state of the O(dest) file.
- - V(absent) when the file does not exist.
- - V(archive) when the file is an archive.
- - V(compress) when the file is compressed, but not an archive.
- - V(incomplete) when the file is an archive, but some files under O(path) were not found.
- type: str
- returned: success
- version_added: 3.4.0
+ description:
+ - The state of the O(dest) file.
+ - V(absent) when the file does not exist.
+ - V(archive) when the file is an archive.
+ - V(compress) when the file is compressed, but not an archive.
+ - V(incomplete) when the file is an archive, but some files under O(path) were not found.
+ type: str
+ returned: success
+ version_added: 3.4.0
missing:
- description: Any files that were missing from the source.
- type: list
- returned: success
+ description: Any files that were missing from the source.
+ type: list
+ returned: success
archived:
- description: Any files that were compressed or added to the archive.
- type: list
- returned: success
+ description: Any files that were compressed or added to the archive.
+ type: list
+ returned: success
arcroot:
- description: The archive root.
- type: str
- returned: always
+ description: The archive root.
+ type: str
+ returned: always
expanded_paths:
- description: The list of matching paths from paths argument.
- type: list
- returned: always
+ description: The list of matching paths from paths argument.
+ type: list
+ returned: always
expanded_exclude_paths:
- description: The list of matching exclude paths from the exclude_path argument.
- type: list
- returned: always
-'''
+ description: The list of matching exclude paths from the exclude_path argument.
+ type: list
+ returned: always
+"""
import abc
import bz2
diff --git a/plugins/modules/atomic_container.py b/plugins/modules/atomic_container.py
index d1567c8923..aba3827ea0 100644
--- a/plugins/modules/atomic_container.py
+++ b/plugins/modules/atomic_container.py
@@ -9,69 +9,71 @@ from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = r'''
----
+DOCUMENTATION = r"""
module: atomic_container
short_description: Manage the containers on the atomic host platform
description:
- - Manage the containers on the atomic host platform.
- - Allows to manage the lifecycle of a container on the atomic host platform.
+ - Manage the containers on the atomic host platform.
+ - Allows to manage the lifecycle of a container on the atomic host platform.
+deprecated:
+ removed_in: 13.0.0
+ why: Project Atomic was sunset by the end of 2019.
+ alternative: There is none.
author: "Giuseppe Scrivano (@giuseppe)"
-notes:
- - Host should support C(atomic) command
requirements:
- - atomic
+ - atomic
+notes:
+ - According to U(https://projectatomic.io/) the project has been sunset around 2019/2020, in favor of C(podman) and Fedora CoreOS.
extends_documentation_fragment:
- - community.general.attributes
+ - community.general.attributes
attributes:
check_mode:
support: none
diff_mode:
support: none
options:
- backend:
- description:
- - Define the backend to use for the container.
- required: true
- choices: ["docker", "ostree"]
- type: str
- name:
- description:
- - Name of the container.
- required: true
- type: str
- image:
- description:
- - The image to use to install the container.
- required: true
- type: str
- rootfs:
- description:
- - Define the rootfs of the image.
- type: str
- state:
- description:
- - State of the container.
- choices: ["absent", "latest", "present", "rollback"]
- default: "latest"
- type: str
- mode:
- description:
- - Define if it is an user or a system container.
- choices: ["user", "system"]
- type: str
- values:
- description:
- - Values for the installation of the container.
- - This option is permitted only with mode 'user' or 'system'.
- - The values specified here will be used at installation time as --set arguments for atomic install.
- type: list
- elements: str
- default: []
-'''
-
-EXAMPLES = r'''
+ backend:
+ description:
+ - Define the backend to use for the container.
+ required: true
+ choices: ["docker", "ostree"]
+ type: str
+ name:
+ description:
+ - Name of the container.
+ required: true
+ type: str
+ image:
+ description:
+ - The image to use to install the container.
+ required: true
+ type: str
+ rootfs:
+ description:
+ - Define the rootfs of the image.
+ type: str
+ state:
+ description:
+ - State of the container.
+ choices: ["absent", "latest", "present", "rollback"]
+ default: "latest"
+ type: str
+ mode:
+ description:
+ - Define if it is an user or a system container.
+ choices: ["user", "system"]
+ type: str
+ values:
+ description:
+ - Values for the installation of the container.
+ - This option is permitted only with mode 'user' or 'system'.
+ - The values specified here will be used at installation time as --set arguments for atomic install.
+ type: list
+ elements: str
+ default: []
+"""
+EXAMPLES = r"""
- name: Install the etcd system container
community.general.atomic_container:
name: etcd
@@ -80,7 +82,7 @@ EXAMPLES = r'''
state: latest
mode: system
values:
- - ETCD_NAME=etcd.server
+ - ETCD_NAME=etcd.server
- name: Uninstall the etcd system container
community.general.atomic_container:
@@ -89,15 +91,15 @@ EXAMPLES = r'''
backend: ostree
state: absent
mode: system
-'''
+"""
-RETURN = r'''
+RETURN = r"""
msg:
- description: The command standard output
- returned: always
- type: str
- sample: 'Using default tag: latest ...'
-'''
+ description: The command standard output.
+ returned: always
+ type: str
+ sample: 'Using default tag: latest ...'
+"""
# import module snippets
import traceback
diff --git a/plugins/modules/atomic_host.py b/plugins/modules/atomic_host.py
index ebb74caf16..fb9bfb2e6a 100644
--- a/plugins/modules/atomic_host.py
+++ b/plugins/modules/atomic_host.py
@@ -8,37 +8,41 @@
from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = r'''
----
+DOCUMENTATION = r"""
module: atomic_host
short_description: Manage the atomic host platform
description:
- - Manage the atomic host platform.
- - Rebooting of Atomic host platform should be done outside this module.
+ - Manage the atomic host platform.
+ - Rebooting of Atomic host platform should be done outside this module.
+deprecated:
+ removed_in: 13.0.0
+ why: Project Atomic was sunset by the end of 2019.
+ alternative: There is none.
author:
-- Saravanan KR (@krsacme)
+ - Saravanan KR (@krsacme)
notes:
- - Host should be an atomic platform (verified by existence of '/run/ostree-booted' file).
+ - Host should be an atomic platform (verified by existence of '/run/ostree-booted' file).
+ - According to U(https://projectatomic.io/) the project has been sunset around 2019/2020, in favor of C(podman) and Fedora CoreOS.
requirements:
- atomic
extends_documentation_fragment:
- community.general.attributes
attributes:
- check_mode:
- support: none
- diff_mode:
- support: none
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
options:
- revision:
- description:
- - The version number of the atomic host to be deployed.
- - Providing V(latest) will upgrade to the latest available version.
- default: 'latest'
- aliases: [ version ]
- type: str
-'''
+ revision:
+ description:
+ - The version number of the atomic host to be deployed.
+ - Providing V(latest) will upgrade to the latest available version.
+ default: 'latest'
+ aliases: [version]
+ type: str
+"""
-EXAMPLES = r'''
+EXAMPLES = r"""
- name: Upgrade the atomic host platform to the latest version (atomic host upgrade)
community.general.atomic_host:
revision: latest
@@ -46,15 +50,15 @@ EXAMPLES = r'''
- name: Deploy a specific revision as the atomic host (atomic host deploy 23.130)
community.general.atomic_host:
revision: 23.130
-'''
+"""
-RETURN = r'''
+RETURN = r"""
msg:
- description: The command standard output
- returned: always
- type: str
- sample: 'Already on latest'
-'''
+ description: The command standard output.
+ returned: always
+ type: str
+ sample: 'Already on latest'
+"""
import os
import traceback
diff --git a/plugins/modules/atomic_image.py b/plugins/modules/atomic_image.py
index 4bd15e27ab..28011676af 100644
--- a/plugins/modules/atomic_image.py
+++ b/plugins/modules/atomic_image.py
@@ -8,52 +8,56 @@
from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = r'''
----
+DOCUMENTATION = r"""
module: atomic_image
short_description: Manage the container images on the atomic host platform
description:
- - Manage the container images on the atomic host platform.
- - Allows to execute the commands specified by the RUN label in the container image when present.
+ - Manage the container images on the atomic host platform.
+ - Allows to execute the commands specified by the RUN label in the container image when present.
+deprecated:
+ removed_in: 13.0.0
+ why: Project Atomic was sunset by the end of 2019.
+ alternative: There is none.
author:
-- Saravanan KR (@krsacme)
+ - Saravanan KR (@krsacme)
notes:
- - Host should support C(atomic) command.
+ - According to U(https://projectatomic.io/) the project has been sunset around 2019/2020, in favor of C(podman) and Fedora CoreOS.
requirements:
- atomic
extends_documentation_fragment:
- community.general.attributes
attributes:
- check_mode:
- support: none
- diff_mode:
- support: none
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
options:
- backend:
- description:
- - Define the backend where the image is pulled.
- choices: [ 'docker', 'ostree' ]
- type: str
- name:
- description:
- - Name of the container image.
- required: true
- type: str
- state:
- description:
- - The state of the container image.
- - The state V(latest) will ensure container image is upgraded to the latest version and forcefully restart container, if running.
- choices: [ 'absent', 'latest', 'present' ]
- default: 'latest'
- type: str
- started:
- description:
- - Start or Stop the container.
- type: bool
- default: true
-'''
+ backend:
+ description:
+ - Define the backend where the image is pulled.
+ choices: ['docker', 'ostree']
+ type: str
+ name:
+ description:
+ - Name of the container image.
+ required: true
+ type: str
+ state:
+ description:
+ - The state of the container image.
+ - The state V(latest) will ensure container image is upgraded to the latest version and forcefully restart container,
+ if running.
+ choices: ['absent', 'latest', 'present']
+ default: 'latest'
+ type: str
+ started:
+ description:
+ - Start or stop the container.
+ type: bool
+ default: true
+"""
-EXAMPLES = r'''
+EXAMPLES = r"""
- name: Execute the run command on rsyslog container image (atomic run rhel7/rsyslog)
community.general.atomic_image:
name: rhel7/rsyslog
@@ -64,15 +68,15 @@ EXAMPLES = r'''
name: busybox
state: latest
backend: ostree
-'''
+"""
-RETURN = r'''
+RETURN = r"""
msg:
- description: The command standard output
- returned: always
- type: str
- sample: 'Using default tag: latest ...'
-'''
+ description: The command standard output.
+ returned: always
+ type: str
+ sample: 'Using default tag: latest ...'
+"""
import traceback
from ansible.module_utils.basic import AnsibleModule
diff --git a/plugins/modules/awall.py b/plugins/modules/awall.py
index f3c2384b5b..0bc4ca1d79 100644
--- a/plugins/modules/awall.py
+++ b/plugins/modules/awall.py
@@ -9,15 +9,14 @@ from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = r'''
----
+DOCUMENTATION = r"""
module: awall
short_description: Manage awall policies
author: Ted Trask (@tdtrask)
description:
- This modules allows for enable/disable/activate of C(awall) policies.
- - Alpine Wall (C(awall)) generates a firewall configuration from the enabled policy files
- and activates the configuration on the system.
+ - Alpine Wall (C(awall)) generates a firewall configuration from the enabled policy files and activates the configuration
+ on the system.
extends_documentation_fragment:
- community.general.attributes
attributes:
@@ -35,39 +34,39 @@ options:
description:
- Whether the policies should be enabled or disabled.
type: str
- choices: [ disabled, enabled ]
+ choices: [disabled, enabled]
default: enabled
activate:
description:
- Activate the new firewall rules.
- Can be run with other steps or on its own.
- - Idempotency is affected if O(activate=true), as the module will always report a changed state.
+ - Idempotency is affected if O(activate=true), as the module always reports a changed state.
type: bool
default: false
notes:
- - At least one of O(name) and O(activate) is required.
-'''
+ - At least one of O(name) and O(activate) is required.
+"""
-EXAMPLES = r'''
+EXAMPLES = r"""
- name: Enable "foo" and "bar" policy
community.general.awall:
- name: [ foo bar ]
+ name: [foo bar]
state: enabled
- name: Disable "foo" and "bar" policy and activate new rules
community.general.awall:
name:
- - foo
- - bar
+ - foo
+ - bar
state: disabled
activate: false
- name: Activate currently enabled firewall rules
community.general.awall:
activate: true
-'''
+"""
-RETURN = ''' # '''
+RETURN = """ # """
import re
from ansible.module_utils.basic import AnsibleModule
diff --git a/plugins/modules/beadm.py b/plugins/modules/beadm.py
index 8857fd8464..0c200661f1 100644
--- a/plugins/modules/beadm.py
+++ b/plugins/modules/beadm.py
@@ -9,62 +9,59 @@ from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = r'''
----
+DOCUMENTATION = r"""
module: beadm
short_description: Manage ZFS boot environments on FreeBSD/Solaris/illumos systems
description:
- - Create, delete or activate ZFS boot environments.
- - Mount and unmount ZFS boot environments.
+ - Create, delete or activate ZFS boot environments.
+ - Mount and unmount ZFS boot environments.
author: Adam Števko (@xen0l)
extends_documentation_fragment:
- - community.general.attributes
+ - community.general.attributes
attributes:
- check_mode:
- support: full
- diff_mode:
- support: none
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
options:
- name:
- description:
- - ZFS boot environment name.
- type: str
- required: true
- aliases: [ "be" ]
- snapshot:
- description:
- - If specified, the new boot environment will be cloned from the given
- snapshot or inactive boot environment.
- type: str
+ name:
description:
- description:
- - Associate a description with a new boot environment. This option is
- available only on Solarish platforms.
- type: str
- options:
- description:
- - Create the datasets for new BE with specific ZFS properties.
- - Multiple options can be specified.
- - This option is available only on Solarish platforms.
- type: str
- mountpoint:
- description:
- - Path where to mount the ZFS boot environment.
- type: path
- state:
- description:
- - Create or delete ZFS boot environment.
- type: str
- choices: [ absent, activated, mounted, present, unmounted ]
- default: present
- force:
- description:
- - Specifies if the unmount should be forced.
- type: bool
- default: false
-'''
+ - ZFS boot environment name.
+ type: str
+ required: true
+ aliases: ["be"]
+ snapshot:
+ description:
+ - If specified, the new boot environment is cloned from the given snapshot or inactive boot environment.
+ type: str
+ description:
+ description:
+ - Associate a description with a new boot environment. This option is available only on Solarish platforms.
+ type: str
+ options:
+ description:
+ - Create the datasets for new BE with specific ZFS properties.
+ - Multiple options can be specified.
+ - This option is available only on Solarish platforms.
+ type: str
+ mountpoint:
+ description:
+ - Path where to mount the ZFS boot environment.
+ type: path
+ state:
+ description:
+ - Create or delete ZFS boot environment.
+ type: str
+ choices: [absent, activated, mounted, present, unmounted]
+ default: present
+ force:
+ description:
+ - Specifies if the unmount should be forced.
+ type: bool
+ default: false
+"""
-EXAMPLES = r'''
+EXAMPLES = r"""
- name: Create ZFS boot environment
community.general.beadm:
name: upgrade-be
@@ -103,45 +100,45 @@ EXAMPLES = r'''
community.general.beadm:
name: upgrade-be
state: activated
-'''
+"""
-RETURN = r'''
+RETURN = r"""
name:
- description: BE name
- returned: always
- type: str
- sample: pre-upgrade
+ description: BE name.
+ returned: always
+ type: str
+ sample: pre-upgrade
snapshot:
- description: ZFS snapshot to create BE from
- returned: always
- type: str
- sample: rpool/ROOT/oi-hipster@fresh
+ description: ZFS snapshot to create BE from.
+ returned: always
+ type: str
+ sample: rpool/ROOT/oi-hipster@fresh
description:
- description: BE description
- returned: always
- type: str
- sample: Upgrade from 9.0 to 10.0
+ description: BE description.
+ returned: always
+ type: str
+ sample: Upgrade from 9.0 to 10.0
options:
- description: BE additional options
- returned: always
- type: str
- sample: compression=on
+ description: BE additional options.
+ returned: always
+ type: str
+ sample: compression=on
mountpoint:
- description: BE mountpoint
- returned: always
- type: str
- sample: /mnt/be
+ description: BE mountpoint.
+ returned: always
+ type: str
+ sample: /mnt/be
state:
- description: state of the target
- returned: always
- type: str
- sample: present
+ description: State of the target.
+ returned: always
+ type: str
+ sample: present
force:
- description: If forced action is wanted
- returned: always
- type: bool
- sample: false
-'''
+ description: If forced action is wanted.
+ returned: always
+ type: bool
+ sample: false
+"""
import os
from ansible.module_utils.basic import AnsibleModule
diff --git a/plugins/modules/bearychat.py b/plugins/modules/bearychat.py
index f52737facd..e738d83d36 100644
--- a/plugins/modules/bearychat.py
+++ b/plugins/modules/bearychat.py
@@ -7,12 +7,11 @@
from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = '''
+DOCUMENTATION = r"""
module: bearychat
short_description: Send BearyChat notifications
description:
- - The M(community.general.bearychat) module sends notifications to U(https://bearychat.com)
- via the Incoming Robot integration.
+ - The M(community.general.bearychat) module sends notifications to U(https://bearychat.com) using the Incoming Robot integration.
author: "Jiangge Zhang (@tonyseek)"
extends_documentation_fragment:
- community.general.attributes
@@ -25,8 +24,7 @@ options:
url:
type: str
description:
- - BearyChat WebHook URL. This authenticates you to the bearychat
- service. It looks like
+ - BearyChat WebHook URL. This authenticates you to the bearychat service. It looks like
V(https://hook.bearychat.com/=ae2CF/incoming/e61bd5c57b164e04b11ac02e66f47f60).
required: true
text:
@@ -35,23 +33,22 @@ options:
- Message to send.
markdown:
description:
- - If V(true), text will be parsed as markdown.
+ - If V(true), text is parsed as markdown.
default: true
type: bool
channel:
type: str
description:
- - Channel to send the message to. If absent, the message goes to the
- default channel selected by the O(url).
+ - Channel to send the message to. If absent, the message goes to the default channel selected by the O(url).
attachments:
type: list
elements: dict
description:
- Define a list of attachments. For more information, see
- https://github.com/bearyinnovative/bearychat-tutorial/blob/master/robots/incoming.md#attachments
-'''
+ U(https://github.com/bearyinnovative/bearychat-tutorial/blob/master/robots/incoming.md#attachments).
+"""
-EXAMPLES = """
+EXAMPLES = r"""
- name: Send notification message via BearyChat
local_action:
module: bearychat
@@ -75,12 +72,12 @@ EXAMPLES = """
- http://example.com/index.png
"""
-RETURN = """
+RETURN = r"""
msg:
- description: execution result
- returned: success
- type: str
- sample: "OK"
+ description: Execution result.
+ returned: success
+ type: str
+ sample: "OK"
"""
try:
diff --git a/plugins/modules/bigpanda.py b/plugins/modules/bigpanda.py
index 7bde5fc1d8..81e2085b7d 100644
--- a/plugins/modules/bigpanda.py
+++ b/plugins/modules/bigpanda.py
@@ -8,13 +8,13 @@ from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
module: bigpanda
author: "Hagai Kariti (@hkariti)"
short_description: Notify BigPanda about deployments
description:
- - Notify BigPanda when deployments start and end (successfully or not). Returns a deployment object containing all the parameters for future module calls.
+ - Notify BigPanda when deployments start and end (successfully or not). Returns a deployment object containing all the parameters
+ for future module calls.
extends_documentation_fragment:
- community.general.attributes
attributes:
@@ -26,7 +26,7 @@ options:
component:
type: str
description:
- - "The name of the component being deployed. Ex: billing"
+ - 'The name of the component being deployed. Ex: V(billing).'
required: true
aliases: ['name']
version:
@@ -55,7 +55,7 @@ options:
env:
type: str
description:
- - The environment name, typically 'production', 'staging', etc.
+ - The environment name, typically V(production), V(staging), and so on.
required: false
owner:
type: str
@@ -75,27 +75,27 @@ options:
default: "https://api.bigpanda.io"
validate_certs:
description:
- - If V(false), SSL certificates for the target url will not be validated. This should only be used
- on personally controlled sites using self-signed certificates.
+ - If V(false), SSL certificates for the target URL are not validated. This should only be used on personally controlled
+ sites using self-signed certificates.
required: false
default: true
type: bool
deployment_message:
type: str
description:
- - Message about the deployment.
+ - Message about the deployment.
version_added: '0.2.0'
source_system:
type: str
description:
- - Source system used in the requests to the API
+ - Source system used in the requests to the API.
default: ansible
# informational: requirements for nodes
-requirements: [ ]
-'''
+requirements: []
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Notify BigPanda about a deployment
community.general.bigpanda:
component: myapp
@@ -128,7 +128,7 @@ EXAMPLES = '''
token: '{{ deployment.token }}'
state: finished
delegate_to: localhost
-'''
+"""
# ===========================================
# Module execution.
@@ -150,14 +150,14 @@ def main():
version=dict(required=True),
token=dict(required=True, no_log=True),
state=dict(required=True, choices=['started', 'finished', 'failed']),
- hosts=dict(required=False, aliases=['host']),
- env=dict(required=False),
- owner=dict(required=False),
- description=dict(required=False),
- deployment_message=dict(required=False),
- source_system=dict(required=False, default='ansible'),
+ hosts=dict(aliases=['host']),
+ env=dict(),
+ owner=dict(),
+ description=dict(),
+ deployment_message=dict(),
+ source_system=dict(default='ansible'),
validate_certs=dict(default=True, type='bool'),
- url=dict(required=False, default='https://api.bigpanda.io'),
+ url=dict(default='https://api.bigpanda.io'),
),
supports_check_mode=True,
)
diff --git a/plugins/modules/bitbucket_access_key.py b/plugins/modules/bitbucket_access_key.py
index 29c19b8b3d..f78f55d3bb 100644
--- a/plugins/modules/bitbucket_access_key.py
+++ b/plugins/modules/bitbucket_access_key.py
@@ -8,8 +8,7 @@
from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = r'''
----
+DOCUMENTATION = r"""
module: bitbucket_access_key
short_description: Manages Bitbucket repository access keys
description:
@@ -33,7 +32,7 @@ options:
workspace:
description:
- The repository owner.
- - "B(Note:) O(ignore:username) used to be an alias of this option. Since community.general 6.0.0 it is an alias of O(user)."
+ - B(Note:) O(ignore:username) used to be an alias of this option. Since community.general 6.0.0 it is an alias of O(user).
type: str
required: true
key:
@@ -50,13 +49,13 @@ options:
- Indicates desired state of the access key.
type: str
required: true
- choices: [ absent, present ]
+ choices: [absent, present]
notes:
- Bitbucket OAuth consumer or App password should have permissions to read and administrate account repositories.
- Check mode is supported.
-'''
+"""
-EXAMPLES = r'''
+EXAMPLES = r"""
- name: Create access key
community.general.bitbucket_access_key:
repository: 'bitbucket-repo'
@@ -71,9 +70,9 @@ EXAMPLES = r'''
workspace: bitbucket_workspace
label: Bitbucket
state: absent
-'''
+"""
-RETURN = r''' # '''
+RETURN = r""" # """
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.general.plugins.module_utils.source_control.bitbucket import BitbucketHelper
diff --git a/plugins/modules/bitbucket_pipeline_key_pair.py b/plugins/modules/bitbucket_pipeline_key_pair.py
index 3bc41c2987..e16af96867 100644
--- a/plugins/modules/bitbucket_pipeline_key_pair.py
+++ b/plugins/modules/bitbucket_pipeline_key_pair.py
@@ -8,8 +8,7 @@
from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = r'''
----
+DOCUMENTATION = r"""
module: bitbucket_pipeline_key_pair
short_description: Manages Bitbucket pipeline SSH key pair
description:
@@ -33,7 +32,7 @@ options:
workspace:
description:
- The repository owner.
- - "B(Note:) O(ignore:username) used to be an alias of this option. Since community.general 6.0.0 it is an alias of O(user)."
+ - B(Note:) O(ignore:username) used to be an alias of this option. Since community.general 6.0.0 it is an alias of O(user).
type: str
required: true
public_key:
@@ -49,12 +48,12 @@ options:
- Indicates desired state of the key pair.
type: str
required: true
- choices: [ absent, present ]
+ choices: [absent, present]
notes:
- Check mode is supported.
-'''
+"""
-EXAMPLES = r'''
+EXAMPLES = r"""
- name: Create or update SSH key pair
community.general.bitbucket_pipeline_key_pair:
repository: 'bitbucket-repo'
@@ -68,9 +67,9 @@ EXAMPLES = r'''
repository: bitbucket-repo
workspace: bitbucket_workspace
state: absent
-'''
+"""
-RETURN = r''' # '''
+RETURN = r""" # """
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.general.plugins.module_utils.source_control.bitbucket import BitbucketHelper
diff --git a/plugins/modules/bitbucket_pipeline_known_host.py b/plugins/modules/bitbucket_pipeline_known_host.py
index 3e6c4bfbf1..eb8b22b4f0 100644
--- a/plugins/modules/bitbucket_pipeline_known_host.py
+++ b/plugins/modules/bitbucket_pipeline_known_host.py
@@ -8,13 +8,12 @@
from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = r'''
----
+DOCUMENTATION = r"""
module: bitbucket_pipeline_known_host
short_description: Manages Bitbucket pipeline known hosts
description:
- Manages Bitbucket pipeline known hosts under the "SSH Keys" menu.
- - The host fingerprint will be retrieved automatically, but in case of an error, one can use O(key) field to specify it manually.
+ - The host fingerprint is retrieved automatically, but in case of an error, one can use O(key) field to specify it manually.
author:
- Evgeniy Krysanov (@catcombo)
extends_documentation_fragment:
@@ -36,7 +35,7 @@ options:
workspace:
description:
- The repository owner.
- - "B(Note:) O(ignore:username) used to be an alias of this option. Since community.general 6.0.0 it is an alias of O(user)."
+ - B(Note:) O(ignore:username) used to be an alias of this option. Since community.general 6.0.0 it is an alias of O(user).
type: str
required: true
name:
@@ -53,12 +52,12 @@ options:
- Indicates desired state of the record.
type: str
required: true
- choices: [ absent, present ]
+ choices: [absent, present]
notes:
- Check mode is supported.
-'''
+"""
-EXAMPLES = r'''
+EXAMPLES = r"""
- name: Create known hosts from the list
community.general.bitbucket_pipeline_known_host:
repository: 'bitbucket-repo'
@@ -83,9 +82,9 @@ EXAMPLES = r'''
name: bitbucket.org
key: '{{lookup("file", "bitbucket.pub") }}'
state: absent
-'''
+"""
-RETURN = r''' # '''
+RETURN = r""" # """
import socket
diff --git a/plugins/modules/bitbucket_pipeline_variable.py b/plugins/modules/bitbucket_pipeline_variable.py
index 1ff8e43753..08a1d3f1e8 100644
--- a/plugins/modules/bitbucket_pipeline_variable.py
+++ b/plugins/modules/bitbucket_pipeline_variable.py
@@ -8,8 +8,7 @@
from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = r'''
----
+DOCUMENTATION = r"""
module: bitbucket_pipeline_variable
short_description: Manages Bitbucket pipeline variables
description:
@@ -33,7 +32,7 @@ options:
workspace:
description:
- The repository owner.
- - "B(Note:) O(ignore:username) used to be an alias of this option. Since community.general 6.0.0 it is an alias of O(user)."
+ - B(Note:) O(ignore:username) used to be an alias of this option. Since community.general 6.0.0 it is an alias of O(user).
type: str
required: true
name:
@@ -55,13 +54,13 @@ options:
- Indicates desired state of the variable.
type: str
required: true
- choices: [ absent, present ]
+ choices: [absent, present]
notes:
- Check mode is supported.
- For secured values return parameter C(changed) is always V(true).
-'''
+"""
-EXAMPLES = r'''
+EXAMPLES = r"""
- name: Create or update pipeline variables from the list
community.general.bitbucket_pipeline_variable:
repository: 'bitbucket-repo'
@@ -71,8 +70,8 @@ EXAMPLES = r'''
secured: '{{ item.secured }}'
state: present
with_items:
- - { name: AWS_ACCESS_KEY, value: ABCD1234, secured: false }
- - { name: AWS_SECRET, value: qwe789poi123vbn0, secured: true }
+ - {name: AWS_ACCESS_KEY, value: ABCD1234, secured: false}
+ - {name: AWS_SECRET, value: qwe789poi123vbn0, secured: true}
- name: Remove pipeline variable
community.general.bitbucket_pipeline_variable:
@@ -80,9 +79,9 @@ EXAMPLES = r'''
workspace: bitbucket_workspace
name: AWS_ACCESS_KEY
state: absent
-'''
+"""
-RETURN = r''' # '''
+RETURN = r""" # """
from ansible.module_utils.basic import AnsibleModule, _load_params
from ansible_collections.community.general.plugins.module_utils.source_control.bitbucket import BitbucketHelper
diff --git a/plugins/modules/bootc_manage.py b/plugins/modules/bootc_manage.py
index 5628ffcca0..da92c02b06 100644
--- a/plugins/modules/bootc_manage.py
+++ b/plugins/modules/bootc_manage.py
@@ -8,34 +8,32 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
module: bootc_manage
version_added: 9.3.0
author:
-- Ryan Cook (@cooktheryan)
+ - Ryan Cook (@cooktheryan)
short_description: Bootc Switch and Upgrade
description:
- - This module manages the switching and upgrading of C(bootc).
+ - This module manages the switching and upgrading of C(bootc).
options:
- state:
- description:
- - 'Control to apply the latest image or switch the image.'
- - 'B(Note:) This will not reboot the system.'
- - 'Please use M(ansible.builtin.reboot) to reboot the system.'
- required: true
- type: str
- choices: ['switch', 'latest']
- image:
- description:
- - 'The image to switch to.'
- - 'This is required when O(state=switch).'
- required: false
- type: str
+ state:
+ description:
+ - Control whether to apply the latest image or switch the image.
+ - B(Note:) This does not reboot the system.
+ - Please use M(ansible.builtin.reboot) to reboot the system.
+ required: true
+ type: str
+ choices: ['switch', 'latest']
+ image:
+ description:
+ - The image to switch to.
+ - This is required when O(state=switch).
+ required: false
+ type: str
+"""
-'''
-
-EXAMPLES = '''
+EXAMPLES = r"""
# Switch to a different image
- name: Provide image to switch to a different image and retain the current running image
community.general.bootc_manage:
@@ -46,10 +44,10 @@ EXAMPLES = '''
- name: Apply updates of the current running image
community.general.bootc_manage:
state: latest
-'''
+"""
-RETURN = '''
-'''
+RETURN = r"""
+"""
from ansible.module_utils.basic import AnsibleModule
@@ -59,7 +57,7 @@ from ansible.module_utils.common.locale import get_best_parsable_locale
def main():
argument_spec = dict(
state=dict(type='str', required=True, choices=['switch', 'latest']),
- image=dict(type='str', required=False),
+ image=dict(type='str'),
)
module = AnsibleModule(
argument_spec=argument_spec,
diff --git a/plugins/modules/bower.py b/plugins/modules/bower.py
index 1824e68bb8..547152fa98 100644
--- a/plugins/modules/bower.py
+++ b/plugins/modules/bower.py
@@ -9,12 +9,11 @@ from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
module: bower
-short_description: Manage bower packages with bower
+short_description: Manage bower packages with C(bower)
description:
- - Manage bower packages with bower
+ - Manage bower packages with C(bower).
author: "Michael Warkentin (@mwarkentin)"
extends_documentation_fragment:
- community.general.attributes
@@ -27,39 +26,39 @@ options:
name:
type: str
description:
- - The name of a bower package to install
+ - The name of a bower package to install.
offline:
description:
- - Install packages from local cache, if the packages were installed before
+ - Install packages from local cache, if the packages were installed before.
type: bool
default: false
production:
description:
- - Install with --production flag
+ - Install with C(--production) flag.
type: bool
default: false
path:
type: path
description:
- - The base path where to install the bower packages
+ - The base path where to install the bower packages.
required: true
relative_execpath:
type: path
description:
- - Relative path to bower executable from install path
+ - Relative path to bower executable from install path.
state:
type: str
description:
- - The state of the bower package
+ - The state of the bower package.
default: present
- choices: [ "present", "absent", "latest" ]
+ choices: ["present", "absent", "latest"]
version:
type: str
description:
- - The version to be installed
-'''
+ - The version to be installed.
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Install "bootstrap" bower package.
community.general.bower:
name: bootstrap
@@ -91,7 +90,8 @@ EXAMPLES = '''
- community.general.bower:
path: /app/location
relative_execpath: node_modules/.bin
-'''
+"""
+
import json
import os
@@ -187,13 +187,13 @@ class Bower(object):
def main():
arg_spec = dict(
- name=dict(default=None),
+ name=dict(),
offline=dict(default=False, type='bool'),
production=dict(default=False, type='bool'),
path=dict(required=True, type='path'),
- relative_execpath=dict(default=None, required=False, type='path'),
+ relative_execpath=dict(type='path'),
state=dict(default='present', choices=['present', 'absent', 'latest', ]),
- version=dict(default=None),
+ version=dict(),
)
module = AnsibleModule(
argument_spec=arg_spec
diff --git a/plugins/modules/btrfs_info.py b/plugins/modules/btrfs_info.py
index c367b9ed10..9467fb782d 100644
--- a/plugins/modules/btrfs_info.py
+++ b/plugins/modules/btrfs_info.py
@@ -7,78 +7,73 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
-DOCUMENTATION = r'''
----
+DOCUMENTATION = r"""
module: btrfs_info
short_description: Query btrfs filesystem info
version_added: "6.6.0"
-description: Query status of available btrfs filesystems, including uuid, label, subvolumes and mountpoints.
+description: Query status of available btrfs filesystems, including UUID, label, subvolumes and mountpoints.
author:
- - Gregory Furlong (@gnfzdz)
+ - Gregory Furlong (@gnfzdz)
extends_documentation_fragment:
- - community.general.attributes
- - community.general.attributes.info_module
-'''
-
-EXAMPLES = r'''
+ - community.general.attributes
+ - community.general.attributes.info_module
+"""
+EXAMPLES = r"""
- name: Query information about mounted btrfs filesystems
community.general.btrfs_info:
register: my_btrfs_info
+"""
-'''
-
-RETURN = r'''
-
+RETURN = r"""
filesystems:
- description: Summaries of the current state for all btrfs filesystems found on the target host.
- type: list
- elements: dict
- returned: success
- contains:
- uuid:
- description: A unique identifier assigned to the filesystem.
- type: str
- sample: 96c9c605-1454-49b8-a63a-15e2584c208e
- label:
- description: An optional label assigned to the filesystem.
- type: str
- sample: Tank
- devices:
- description: A list of devices assigned to the filesystem.
- type: list
- sample:
- - /dev/sda1
- - /dev/sdb1
- default_subvolume:
- description: The id of the filesystem's default subvolume.
- type: int
- sample: 5
- subvolumes:
- description: A list of dicts containing metadata for all of the filesystem's subvolumes.
- type: list
- elements: dict
- contains:
- id:
- description: An identifier assigned to the subvolume, unique within the containing filesystem.
- type: int
- sample: 256
- mountpoints:
- description: Paths where the subvolume is mounted on the targeted host.
- type: list
- sample: ['/home']
- parent:
- description: The identifier of this subvolume's parent.
- type: int
- sample: 5
- path:
- description: The full path of the subvolume relative to the btrfs fileystem's root.
- type: str
- sample: /@home
-
-'''
+ description: Summaries of the current state for all btrfs filesystems found on the target host.
+ type: list
+ elements: dict
+ returned: success
+ contains:
+ uuid:
+ description: A unique identifier assigned to the filesystem.
+ type: str
+ sample: 96c9c605-1454-49b8-a63a-15e2584c208e
+ label:
+ description: An optional label assigned to the filesystem.
+ type: str
+ sample: Tank
+ devices:
+ description: A list of devices assigned to the filesystem.
+ type: list
+ sample:
+ - /dev/sda1
+ - /dev/sdb1
+ default_subvolume:
+ description: The ID of the filesystem's default subvolume.
+ type: int
+ sample: 5
+ subvolumes:
+ description: A list of dicts containing metadata for all of the filesystem's subvolumes.
+ type: list
+ elements: dict
+ contains:
+ id:
+ description: An identifier assigned to the subvolume, unique within the containing filesystem.
+ type: int
+ sample: 256
+ mountpoints:
+ description: Paths where the subvolume is mounted on the targeted host.
+ type: list
+ sample: ["/home"]
+ parent:
+ description: The identifier of this subvolume's parent.
+ type: int
+ sample: 5
+ path:
+ description: The full path of the subvolume relative to the btrfs fileystem's root.
+ type: str
+ sample: /@home
+"""
from ansible_collections.community.general.plugins.module_utils.btrfs import BtrfsFilesystemsProvider
diff --git a/plugins/modules/btrfs_subvolume.py b/plugins/modules/btrfs_subvolume.py
index 0aa38bf0e4..3c34ef4680 100644
--- a/plugins/modules/btrfs_subvolume.py
+++ b/plugins/modules/btrfs_subvolume.py
@@ -7,8 +7,7 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
-DOCUMENTATION = r'''
----
+DOCUMENTATION = r"""
module: btrfs_subvolume
short_description: Manage btrfs subvolumes
version_added: "6.6.0"
@@ -16,71 +15,73 @@ version_added: "6.6.0"
description: Creates, updates and deletes btrfs subvolumes and snapshots.
options:
- automount:
- description:
- - Allow the module to temporarily mount the targeted btrfs filesystem in order to validate the current state and make any required changes.
- type: bool
- default: false
- default:
- description:
- - Make the subvolume specified by O(name) the filesystem's default subvolume.
- type: bool
- default: false
- filesystem_device:
- description:
- - A block device contained within the btrfs filesystem to be targeted.
- - Useful when multiple btrfs filesystems are present to specify which filesystem should be targeted.
- type: path
- filesystem_label:
- description:
- - A descriptive label assigned to the btrfs filesystem to be targeted.
- - Useful when multiple btrfs filesystems are present to specify which filesystem should be targeted.
- type: str
- filesystem_uuid:
- description:
- - A unique identifier assigned to the btrfs filesystem to be targeted.
- - Useful when multiple btrfs filesystems are present to specify which filesystem should be targeted.
- type: str
- name:
- description:
- - Name of the subvolume/snapshot to be targeted.
- required: true
- type: str
- recursive:
- description:
- - When true, indicates that parent/child subvolumes should be created/removedas necessary
- to complete the operation (for O(state=present) and O(state=absent) respectively).
- type: bool
- default: false
- snapshot_source:
- description:
- - Identifies the source subvolume for the created snapshot.
- - Infers that the created subvolume is a snapshot.
- type: str
- snapshot_conflict:
- description:
- - Policy defining behavior when a subvolume already exists at the path of the requested snapshot.
- - V(skip) - Create a snapshot only if a subvolume does not yet exist at the target location, otherwise indicate that no change is required.
- Warning, this option does not yet verify that the target subvolume was generated from a snapshot of the requested source.
- - V(clobber) - If a subvolume already exists at the requested location, delete it first.
- This option is not idempotent and will result in a new snapshot being generated on every execution.
- - V(error) - If a subvolume already exists at the requested location, return an error.
- This option is not idempotent and will result in an error on replay of the module.
- type: str
- choices: [ skip, clobber, error ]
- default: skip
- state:
- description:
- - Indicates the current state of the targeted subvolume.
- type: str
- choices: [ absent, present ]
- default: present
+ automount:
+ description:
+ - Allow the module to temporarily mount the targeted btrfs filesystem in order to validate the current state and make
+ any required changes.
+ type: bool
+ default: false
+ default:
+ description:
+ - Make the subvolume specified by O(name) the filesystem's default subvolume.
+ type: bool
+ default: false
+ filesystem_device:
+ description:
+ - A block device contained within the btrfs filesystem to be targeted.
+ - Useful when multiple btrfs filesystems are present to specify which filesystem should be targeted.
+ type: path
+ filesystem_label:
+ description:
+ - A descriptive label assigned to the btrfs filesystem to be targeted.
+ - Useful when multiple btrfs filesystems are present to specify which filesystem should be targeted.
+ type: str
+ filesystem_uuid:
+ description:
+ - A unique identifier assigned to the btrfs filesystem to be targeted.
+ - Useful when multiple btrfs filesystems are present to specify which filesystem should be targeted.
+ type: str
+ name:
+ description:
+ - Name of the subvolume/snapshot to be targeted.
+ required: true
+ type: str
+ recursive:
+ description:
+ - When true, indicates that parent/child subvolumes should be created/removedas necessary to complete the operation
+ (for O(state=present) and O(state=absent) respectively).
+ type: bool
+ default: false
+ snapshot_source:
+ description:
+ - Identifies the source subvolume for the created snapshot.
+ - Infers that the created subvolume is a snapshot.
+ type: str
+ snapshot_conflict:
+ description:
+ - Policy defining behavior when a subvolume already exists at the path of the requested snapshot.
+ - V(skip) - Create a snapshot only if a subvolume does not yet exist at the target location, otherwise indicate that
+ no change is required. Warning, this option does not yet verify that the target subvolume was generated from a snapshot
+ of the requested source.
+ - V(clobber) - If a subvolume already exists at the requested location, delete it first. This option is not idempotent
+ and results in a new snapshot being generated on every execution.
+ - V(error) - If a subvolume already exists at the requested location, return an error. This option is not idempotent
+ and results in an error on replay of the module.
+ type: str
+ choices: [skip, clobber, error]
+ default: skip
+ state:
+ description:
+ - Indicates the current state of the targeted subvolume.
+ type: str
+ choices: [absent, present]
+ default: present
notes:
- - If any or all of the options O(filesystem_device), O(filesystem_label) or O(filesystem_uuid) parameters are provided, there is expected
- to be a matching btrfs filesystem. If none are provided and only a single btrfs filesystem exists or only a single
- btrfs filesystem is mounted, that filesystem will be used; otherwise, the module will take no action and return an error.
-
+ - If any or all of the options O(filesystem_device), O(filesystem_label) or O(filesystem_uuid) parameters are provided,
+ there is expected to be a matching btrfs filesystem. If none are provided and only a single btrfs filesystem exists or
+ only a single btrfs filesystem is mounted, that filesystem is used; otherwise, the module takes no action and returns an
+ error.
extends_documentation_fragment:
- community.general.attributes
@@ -88,17 +89,16 @@ attributes:
check_mode:
support: partial
details:
- - In some scenarios it may erroneously report intermediate subvolumes being created.
- After mounting, if a directory like file is found where the subvolume would have been created, the operation is skipped.
+ - In some scenarios it may erroneously report intermediate subvolumes being created. After mounting, if a directory
+ like file is found where the subvolume would have been created, the operation is skipped.
diff_mode:
support: none
author:
- - Gregory Furlong (@gnfzdz)
-'''
-
-EXAMPLES = r'''
+ - Gregory Furlong (@gnfzdz)
+"""
+EXAMPLES = r"""
- name: Create a @home subvolume under the root subvolume
community.general.btrfs_subvolume:
name: /@home
@@ -120,92 +120,90 @@ EXAMPLES = r'''
community.general.btrfs_subvolume:
name: /@
snapshot_source: /
- default: Yes
+ default: true
filesystem_device: /dev/vda2
- name: Create a snapshot of the /@ subvolume and recursively creating intermediate subvolumes as required
community.general.btrfs_subvolume:
name: /@snapshots/@2022_06_09
snapshot_source: /@
- recursive: True
+ recursive: true
filesystem_device: /dev/vda2
- name: Remove the /@ subvolume and recursively delete child subvolumes as required
community.general.btrfs_subvolume:
name: /@snapshots/@2022_06_09
snapshot_source: /@
- recursive: True
+ recursive: true
filesystem_device: /dev/vda2
+"""
-'''
-
-RETURN = r'''
-
+RETURN = r"""
filesystem:
- description:
+ description:
- A summary of the final state of the targeted btrfs filesystem.
- type: dict
- returned: success
- contains:
- uuid:
- description: A unique identifier assigned to the filesystem.
- returned: success
- type: str
- sample: 96c9c605-1454-49b8-a63a-15e2584c208e
- label:
- description: An optional label assigned to the filesystem.
- returned: success
- type: str
- sample: Tank
- devices:
- description: A list of devices assigned to the filesystem.
- returned: success
- type: list
- sample:
- - /dev/sda1
- - /dev/sdb1
- default_subvolume:
- description: The ID of the filesystem's default subvolume.
- returned: success and if filesystem is mounted
- type: int
- sample: 5
- subvolumes:
- description: A list of dicts containing metadata for all of the filesystem's subvolumes.
- returned: success and if filesystem is mounted
- type: list
- elements: dict
- contains:
- id:
- description: An identifier assigned to the subvolume, unique within the containing filesystem.
- type: int
- sample: 256
- mountpoints:
- description: Paths where the subvolume is mounted on the targeted host.
- type: list
- sample: ['/home']
- parent:
- description: The identifier of this subvolume's parent.
- type: int
- sample: 5
- path:
- description: The full path of the subvolume relative to the btrfs fileystem's root.
- type: str
- sample: /@home
+ type: dict
+ returned: success
+ contains:
+ uuid:
+ description: A unique identifier assigned to the filesystem.
+ returned: success
+ type: str
+ sample: 96c9c605-1454-49b8-a63a-15e2584c208e
+ label:
+ description: An optional label assigned to the filesystem.
+ returned: success
+ type: str
+ sample: Tank
+ devices:
+ description: A list of devices assigned to the filesystem.
+ returned: success
+ type: list
+ sample:
+ - /dev/sda1
+ - /dev/sdb1
+ default_subvolume:
+ description: The ID of the filesystem's default subvolume.
+ returned: success and if filesystem is mounted
+ type: int
+ sample: 5
+ subvolumes:
+ description: A list of dicts containing metadata for all of the filesystem's subvolumes.
+ returned: success and if filesystem is mounted
+ type: list
+ elements: dict
+ contains:
+ id:
+ description: An identifier assigned to the subvolume, unique within the containing filesystem.
+ type: int
+ sample: 256
+ mountpoints:
+ description: Paths where the subvolume is mounted on the targeted host.
+ type: list
+ sample: ["/home"]
+ parent:
+ description: The identifier of this subvolume's parent.
+ type: int
+ sample: 5
+ path:
+ description: The full path of the subvolume relative to the btrfs fileystem's root.
+ type: str
+ sample: /@home
modifications:
- description:
+ description:
- A list where each element describes a change made to the target btrfs filesystem.
- type: list
- returned: Success
- elements: str
+ type: list
+ returned: Success
+ elements: str
target_subvolume_id:
- description:
+ description:
- The ID of the subvolume specified with the O(name) parameter, either pre-existing or created as part of module execution.
- type: int
- sample: 257
- returned: Success and subvolume exists after module execution
-'''
+ type: int
+ sample: 257
+ returned: Success and subvolume exists after module execution
+"""
from ansible_collections.community.general.plugins.module_utils.btrfs import BtrfsFilesystemsProvider, BtrfsCommands, BtrfsModuleException
from ansible_collections.community.general.plugins.module_utils.btrfs import normalize_subvolume_path
@@ -646,16 +644,16 @@ class BtrfsSubvolumeModule(object):
def run_module():
module_args = dict(
- automount=dict(type='bool', required=False, default=False),
- default=dict(type='bool', required=False, default=False),
- filesystem_device=dict(type='path', required=False),
- filesystem_label=dict(type='str', required=False),
- filesystem_uuid=dict(type='str', required=False),
+ automount=dict(type='bool', default=False),
+ default=dict(type='bool', default=False),
+ filesystem_device=dict(type='path'),
+ filesystem_label=dict(type='str'),
+ filesystem_uuid=dict(type='str'),
name=dict(type='str', required=True),
recursive=dict(type='bool', default=False),
- state=dict(type='str', required=False, default='present', choices=['present', 'absent']),
- snapshot_source=dict(type='str', required=False),
- snapshot_conflict=dict(type='str', required=False, default='skip', choices=['skip', 'clobber', 'error'])
+ state=dict(type='str', default='present', choices=['present', 'absent']),
+ snapshot_source=dict(type='str'),
+ snapshot_conflict=dict(type='str', default='skip', choices=['skip', 'clobber', 'error'])
)
module = AnsibleModule(
diff --git a/plugins/modules/bundler.py b/plugins/modules/bundler.py
index 59f10800c1..6bf2556110 100644
--- a/plugins/modules/bundler.py
+++ b/plugins/modules/bundler.py
@@ -9,12 +9,11 @@ from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
module: bundler
short_description: Manage Ruby Gem dependencies with Bundler
description:
- - Manage installation and Gem version dependencies for Ruby using the Bundler gem
+ - Manage installation and Gem version dependencies for Ruby using the Bundler gem.
extends_documentation_fragment:
- community.general.attributes
attributes:
@@ -26,80 +25,72 @@ options:
executable:
type: str
description:
- - The path to the bundler executable
+ - The path to the bundler executable.
state:
type: str
description:
- - The desired state of the Gem bundle. V(latest) updates gems to the most recent, acceptable version
+ - The desired state of the Gem bundle. V(latest) updates gems to the most recent, acceptable version.
choices: [present, latest]
default: present
chdir:
type: path
description:
- - The directory to execute the bundler commands from. This directory
- needs to contain a valid Gemfile or .bundle/ directory
- - If not specified, it will default to the temporary working directory
+ - The directory to execute the bundler commands from. This directory needs to contain a valid Gemfile or .bundle/ directory.
+ - If not specified, it defaults to the temporary working directory.
exclude_groups:
type: list
elements: str
description:
- - A list of Gemfile groups to exclude during operations. This only
- applies when O(state=present). Bundler considers this
- a 'remembered' property for the Gemfile and will automatically exclude
- groups in future operations even if O(exclude_groups) is not set
+ - A list of Gemfile groups to exclude during operations. This only applies when O(state=present). Bundler considers
+ this a 'remembered' property for the Gemfile and automatically excludes groups in future operations even if O(exclude_groups)
+ is not set.
clean:
description:
- - Only applies if O(state=present). If set removes any gems on the
- target host that are not in the gemfile
+ - Only applies if O(state=present). If set removes any gems on the target host that are not in the gemfile.
type: bool
default: false
gemfile:
type: path
description:
- Only applies if O(state=present). The path to the gemfile to use to install gems.
- - If not specified it will default to the Gemfile in current directory
+ - If not specified it defaults to the Gemfile in current directory.
local:
description:
- - If set only installs gems from the cache on the target host
+ - If set only installs gems from the cache on the target host.
type: bool
default: false
deployment_mode:
description:
- - Only applies if O(state=present). If set it will install gems in
- ./vendor/bundle instead of the default location. Requires a Gemfile.lock
- file to have been created prior
+ - Only applies if O(state=present). If set it installs gems in C(./vendor/bundle) instead of the default location. Requires
+ a C(Gemfile.lock) file to have been created prior.
type: bool
default: false
user_install:
description:
- - Only applies if O(state=present). Installs gems in the local user's cache or for all users
+ - Only applies if O(state=present). Installs gems in the local user's cache or for all users.
type: bool
default: true
gem_path:
type: path
description:
- - Only applies if O(state=present). Specifies the directory to
- install the gems into. If O(chdir) is set then this path is relative to
- O(chdir)
- - If not specified the default RubyGems gem paths will be used.
+ - Only applies if O(state=present). Specifies the directory to install the gems into. If O(chdir) is set then this path
+ is relative to O(chdir).
+ - If not specified the default RubyGems gem paths are used.
binstub_directory:
type: path
description:
- - Only applies if O(state=present). Specifies the directory to
- install any gem bins files to. When executed the bin files will run
- within the context of the Gemfile and fail if any required gem
- dependencies are not installed. If O(chdir) is set then this path is
- relative to O(chdir)
+ - Only applies if O(state=present). Specifies the directory to install any gem bins files to. When executed the bin
+ files run within the context of the Gemfile and fail if any required gem dependencies are not installed. If O(chdir)
+ is set then this path is relative to O(chdir).
extra_args:
type: str
description:
- - A space separated string of additional commands that can be applied to
- the Bundler command. Refer to the Bundler documentation for more
- information
+ - A space separated string of additional commands that can be applied to the Bundler command. Refer to the Bundler documentation
+ for more information.
author: "Tim Hoiberg (@thoiberg)"
-'''
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Install gems from a Gemfile in the current directory
community.general.bundler:
state: present
@@ -124,7 +115,7 @@ EXAMPLES = '''
community.general.bundler:
state: latest
chdir: ~/rails_project
-'''
+"""
from ansible.module_utils.basic import AnsibleModule
@@ -140,18 +131,18 @@ def get_bundler_executable(module):
def main():
module = AnsibleModule(
argument_spec=dict(
- executable=dict(default=None, required=False),
- state=dict(default='present', required=False, choices=['present', 'latest']),
- chdir=dict(default=None, required=False, type='path'),
- exclude_groups=dict(default=None, required=False, type='list', elements='str'),
- clean=dict(default=False, required=False, type='bool'),
- gemfile=dict(default=None, required=False, type='path'),
- local=dict(default=False, required=False, type='bool'),
- deployment_mode=dict(default=False, required=False, type='bool'),
- user_install=dict(default=True, required=False, type='bool'),
- gem_path=dict(default=None, required=False, type='path'),
- binstub_directory=dict(default=None, required=False, type='path'),
- extra_args=dict(default=None, required=False),
+ executable=dict(),
+ state=dict(default='present', choices=['present', 'latest']),
+ chdir=dict(type='path'),
+ exclude_groups=dict(type='list', elements='str'),
+ clean=dict(default=False, type='bool'),
+ gemfile=dict(type='path'),
+ local=dict(default=False, type='bool'),
+ deployment_mode=dict(default=False, type='bool'),
+ user_install=dict(default=True, type='bool'),
+ gem_path=dict(type='path'),
+ binstub_directory=dict(type='path'),
+ extra_args=dict(),
),
supports_check_mode=True
)
diff --git a/plugins/modules/bzr.py b/plugins/modules/bzr.py
index 5a60d765c7..76ae917802 100644
--- a/plugins/modules/bzr.py
+++ b/plugins/modules/bzr.py
@@ -9,59 +9,55 @@
from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
module: bzr
author:
-- André Paramés (@andreparames)
+ - André Paramés (@andreparames)
short_description: Deploy software (or files) from bzr branches
description:
- - Manage C(bzr) branches to deploy files or software.
+ - Manage C(bzr) branches to deploy files or software.
extends_documentation_fragment:
- - community.general.attributes
+ - community.general.attributes
attributes:
check_mode:
support: none
diff_mode:
support: none
options:
- name:
- description:
- - SSH or HTTP protocol address of the parent branch.
- aliases: [ parent ]
- required: true
- type: str
- dest:
- description:
- - Absolute path of where the branch should be cloned to.
- required: true
- type: path
- version:
- description:
- - What version of the branch to clone. This can be the
- bzr revno or revid.
- default: head
- type: str
- force:
- description:
- - If V(true), any modified files in the working
- tree will be discarded.
- type: bool
- default: false
- executable:
- description:
- - Path to bzr executable to use. If not supplied,
- the normal mechanism for resolving binary paths will be used.
- type: str
-'''
+ name:
+ description:
+ - SSH or HTTP protocol address of the parent branch.
+ aliases: [parent]
+ required: true
+ type: str
+ dest:
+ description:
+ - Absolute path of where the branch should be cloned to.
+ required: true
+ type: path
+ version:
+ description:
+ - What version of the branch to clone. This can be the bzr revno or revid.
+ default: head
+ type: str
+ force:
+ description:
+ - If V(true), any modified files in the working tree is discarded.
+ type: bool
+ default: false
+ executable:
+ description:
+ - Path to C(bzr) executable to use. If not supplied, the normal mechanism for resolving binary paths is used.
+ type: str
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Checkout
community.general.bzr:
name: bzr+ssh://foosball.example.org/path/to/branch
dest: /srv/checkout
version: 22
-'''
+"""
import os
import re
diff --git a/plugins/modules/campfire.py b/plugins/modules/campfire.py
index 1e0f1ecea4..128790c372 100644
--- a/plugins/modules/campfire.py
+++ b/plugins/modules/campfire.py
@@ -9,15 +9,14 @@ from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
module: campfire
short_description: Send a message to Campfire
description:
- - Send a message to Campfire.
- - Messages with newlines will result in a "Paste" message being sent.
+ - Send a message to Campfire.
+ - Messages with newlines result in a "Paste" message being sent.
extends_documentation_fragment:
- - community.general.attributes
+ - community.general.attributes
attributes:
check_mode:
support: none
@@ -49,22 +48,58 @@ options:
description:
- Send a notification sound before the message.
required: false
- choices: ["56k", "bell", "bezos", "bueller", "clowntown",
- "cottoneyejoe", "crickets", "dadgummit", "dangerzone",
- "danielsan", "deeper", "drama", "greatjob", "greyjoy",
- "guarantee", "heygirl", "horn", "horror",
- "inconceivable", "live", "loggins", "makeitso", "noooo",
- "nyan", "ohmy", "ohyeah", "pushit", "rimshot",
- "rollout", "rumble", "sax", "secret", "sexyback",
- "story", "tada", "tmyk", "trololo", "trombone", "unix",
- "vuvuzela", "what", "whoomp", "yeah", "yodel"]
+ choices:
+ - 56k
+ - bell
+ - bezos
+ - bueller
+ - clowntown
+ - cottoneyejoe
+ - crickets
+ - dadgummit
+ - dangerzone
+ - danielsan
+ - deeper
+ - drama
+ - greatjob
+ - greyjoy
+ - guarantee
+ - heygirl
+ - horn
+ - horror
+ - inconceivable
+ - live
+ - loggins
+ - makeitso
+ - noooo
+ - nyan
+ - ohmy
+ - ohyeah
+ - pushit
+ - rimshot
+ - rollout
+ - rumble
+ - sax
+ - secret
+ - sexyback
+ - story
+ - tada
+ - tmyk
+ - trololo
+ - trombone
+ - unix
+ - vuvuzela
+ - what
+ - whoomp
+ - yeah
+ - yodel
# informational: requirements for nodes
-requirements: [ ]
+requirements: []
author: "Adam Garside (@fabulops)"
-'''
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Send a message to Campfire
community.general.campfire:
subscription: foo
@@ -79,7 +114,7 @@ EXAMPLES = '''
room: 123
notify: loggins
msg: Task completed ... with feeling.
-'''
+"""
try:
from html import escape as html_escape
@@ -102,8 +137,7 @@ def main():
token=dict(required=True, no_log=True),
room=dict(required=True),
msg=dict(required=True),
- notify=dict(required=False,
- choices=["56k", "bell", "bezos", "bueller",
+ notify=dict(choices=["56k", "bell", "bezos", "bueller",
"clowntown", "cottoneyejoe",
"crickets", "dadgummit", "dangerzone",
"danielsan", "deeper", "drama",
diff --git a/plugins/modules/capabilities.py b/plugins/modules/capabilities.py
index a0b6d52223..08bd2e85ff 100644
--- a/plugins/modules/capabilities.py
+++ b/plugins/modules/capabilities.py
@@ -8,48 +8,47 @@
from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = r'''
----
+DOCUMENTATION = r"""
module: capabilities
short_description: Manage Linux capabilities
description:
- - This module manipulates files privileges using the Linux capabilities(7) system.
+ - This module manipulates files privileges using the Linux capabilities(7) system.
extends_documentation_fragment:
- - community.general.attributes
+ - community.general.attributes
attributes:
- check_mode:
- support: full
- diff_mode:
- support: none
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
options:
- path:
- description:
- - Specifies the path to the file to be managed.
- type: str
- required: true
- aliases: [ key ]
- capability:
- description:
- - Desired capability to set (with operator and flags, if O(state=present)) or remove (if O(state=absent))
- type: str
- required: true
- aliases: [ cap ]
- state:
- description:
- - Whether the entry should be present or absent in the file's capabilities.
- type: str
- choices: [ absent, present ]
- default: present
+ path:
+ description:
+ - Specifies the path to the file to be managed.
+ type: str
+ required: true
+ aliases: [key]
+ capability:
+ description:
+ - Desired capability to set (with operator and flags, if O(state=present)) or remove (if O(state=absent)).
+ type: str
+ required: true
+ aliases: [cap]
+ state:
+ description:
+ - Whether the entry should be present or absent in the file's capabilities.
+ type: str
+ choices: [absent, present]
+ default: present
notes:
- - The capabilities system will automatically transform operators and flags into the effective set,
- so for example, C(cap_foo=ep) will probably become C(cap_foo+ep).
- - This module does not attempt to determine the final operator and flags to compare,
- so you will want to ensure that your capabilities argument matches the final capabilities.
+ - The capabilities system automatically transforms operators and flags into the effective set, so for example, C(cap_foo=ep)
+ probably becomes C(cap_foo+ep).
+ - This module does not attempt to determine the final operator and flags to compare, so you want to ensure that your capabilities
+ argument matches the final capabilities.
author:
-- Nate Coraor (@natefoo)
-'''
+ - Nate Coraor (@natefoo)
+"""
-EXAMPLES = r'''
+EXAMPLES = r"""
- name: Set cap_sys_chroot+ep on /foo
community.general.capabilities:
path: /foo
@@ -61,7 +60,7 @@ EXAMPLES = r'''
path: /bar
capability: cap_net_bind_service
state: absent
-'''
+"""
from ansible.module_utils.basic import AnsibleModule
@@ -93,7 +92,7 @@ class CapabilitiesModule(object):
if self.module.check_mode:
self.module.exit_json(changed=True, msg='capabilities changed')
else:
- # remove from current cap list if it's already set (but op/flags differ)
+ # remove from current cap list if it is already set (but op/flags differ)
current = list(filter(lambda x: x[0] != self.capability_tup[0], current))
# add new cap with correct op/flags
current.append(self.capability_tup)
@@ -124,6 +123,8 @@ class CapabilitiesModule(object):
if ' =' in stdout:
# process output of an older version of libcap
caps = stdout.split(' =')[1].strip().split()
+ elif stdout.strip().endswith(")"): # '/foo (Error Message)'
+ self.module.fail_json(msg="Unable to get capabilities of %s" % path, stdout=stdout.strip(), stderr=stderr)
else:
# otherwise, we have a newer version here
# see original commit message of cap/v0.2.40-18-g177cd41 in libcap.git
diff --git a/plugins/modules/cargo.py b/plugins/modules/cargo.py
index 2fc729da20..c00983fade 100644
--- a/plugins/modules/cargo.py
+++ b/plugins/modules/cargo.py
@@ -11,7 +11,6 @@ __metaclass__ = type
DOCUMENTATION = r"""
----
module: cargo
short_description: Manage Rust packages with cargo
version_added: 4.3.0
@@ -29,7 +28,7 @@ options:
executable:
description:
- Path to the C(cargo) installed in the system.
- - If not specified, the module will look C(cargo) in E(PATH).
+ - If not specified, the module looks for C(cargo) in E(PATH).
type: path
version_added: 7.5.0
name:
@@ -39,16 +38,12 @@ options:
elements: str
required: true
path:
- description:
- ->
- The base path where to install the Rust packages. Cargo automatically appends
- V(/bin). In other words, V(/usr/local) will become V(/usr/local/bin).
+ description: The base path where to install the Rust packages. Cargo automatically appends V(/bin). In other words, V(/usr/local)
+ becomes V(/usr/local/bin).
type: path
version:
- description:
- ->
- The version to install. If O(name) contains multiple values, the module will
- try to install all of them in this version.
+ description: The version to install. If O(name) contains multiple values, the module tries to install all of them in this
+ version.
type: str
required: false
locked:
@@ -65,7 +60,7 @@ options:
required: false
type: str
default: present
- choices: [ "present", "absent", "latest" ]
+ choices: ["present", "absent", "latest"]
directory:
description:
- Path to the source directory to install the Rust package from.
@@ -73,8 +68,17 @@ options:
type: path
required: false
version_added: 9.1.0
+ features:
+ description:
+ - List of features to activate.
+ - This is only used when installing packages.
+ type: list
+ elements: str
+ required: false
+ default: []
+ version_added: 11.0.0
requirements:
- - cargo installed
+ - cargo installed
"""
EXAMPLES = r"""
@@ -111,6 +115,12 @@ EXAMPLES = r"""
community.general.cargo:
name: ludusavi
directory: /path/to/ludusavi/source
+
+- name: Install "serpl" Rust package with ast_grep feature
+ community.general.cargo:
+ name: serpl
+ features:
+ - ast_grep
"""
import json
@@ -130,6 +140,7 @@ class Cargo(object):
self.version = kwargs["version"]
self.locked = kwargs["locked"]
self.directory = kwargs["directory"]
+ self.features = kwargs["features"]
@property
def path(self):
@@ -181,6 +192,8 @@ class Cargo(object):
if self.directory:
cmd.append("--path")
cmd.append(self.directory)
+ if self.features:
+ cmd += ["--features", ",".join(self.features)]
return self._exec(cmd)
def is_outdated(self, name):
@@ -234,13 +247,14 @@ class Cargo(object):
def main():
arg_spec = dict(
- executable=dict(default=None, type="path"),
+ executable=dict(type="path"),
name=dict(required=True, type="list", elements="str"),
- path=dict(default=None, type="path"),
+ path=dict(type="path"),
state=dict(default="present", choices=["present", "absent", "latest"]),
- version=dict(default=None, type="str"),
+ version=dict(type="str"),
locked=dict(default=False, type="bool"),
- directory=dict(default=None, type="path"),
+ directory=dict(type="path"),
+ features=dict(default=[], type="list", elements="str"),
)
module = AnsibleModule(argument_spec=arg_spec, supports_check_mode=True)
diff --git a/plugins/modules/catapult.py b/plugins/modules/catapult.py
index acd8398512..448de5d13d 100644
--- a/plugins/modules/catapult.py
+++ b/plugins/modules/catapult.py
@@ -11,14 +11,19 @@ from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
module: catapult
-short_description: Send a sms / mms using the catapult bandwidth api
+short_description: Send a sms / mms using the catapult bandwidth API
description:
- - Allows notifications to be sent using sms / mms via the catapult bandwidth api.
+ - Allows notifications to be sent using SMS / MMS using the catapult bandwidth API.
+deprecated:
+ removed_in: 13.0.0
+ why: >-
+ DNS fails to resolve the API endpoint used by the module since Oct 2024.
+ See L(the associated issue, https://github.com/ansible-collections/community.general/issues/10318) for details.
+ alternative: There is none.
extends_documentation_fragment:
- - community.general.attributes
+ - community.general.attributes
attributes:
check_mode:
support: none
@@ -44,31 +49,30 @@ options:
media:
type: str
description:
- - For MMS messages, a media url to the location of the media to be sent with the message.
+ - For MMS messages, a media URL to the location of the media to be sent with the message.
user_id:
type: str
description:
- - User Id from Api account page.
+ - User ID from API account page.
required: true
api_token:
type: str
description:
- - Api Token from Api account page.
+ - API Token from API account page.
required: true
api_secret:
type: str
description:
- - Api Secret from Api account page.
+ - API Secret from API account page.
required: true
author: "Jonathan Mainguy (@Jmainguy)"
notes:
- - Will return changed even if the media url is wrong.
- - Will return changed if the destination number is invalid.
+ - Will return changed even if the media URL is wrong.
+ - Will return changed if the destination number is invalid.
+"""
-'''
-
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Send a mms to multiple users
community.general.catapult:
src: "+15035555555"
@@ -89,16 +93,7 @@ EXAMPLES = '''
user_id: "{{ user_id }}"
api_token: "{{ api_token }}"
api_secret: "{{ api_secret }}"
-
-'''
-
-RETURN = '''
-changed:
- description: Whether the api accepted the message.
- returned: always
- type: bool
- sample: true
-'''
+"""
import json
@@ -135,7 +130,7 @@ def main():
user_id=dict(required=True),
api_token=dict(required=True, no_log=True),
api_secret=dict(required=True, no_log=True),
- media=dict(default=None, required=False),
+ media=dict(),
),
)
diff --git a/plugins/modules/circonus_annotation.py b/plugins/modules/circonus_annotation.py
index f3b94a0524..9e563171cd 100644
--- a/plugins/modules/circonus_annotation.py
+++ b/plugins/modules/circonus_annotation.py
@@ -9,62 +9,59 @@ from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
module: circonus_annotation
-short_description: Create an annotation in circonus
+short_description: Create an annotation in Circonus
description:
- - Create an annotation event with a given category, title and description. Optionally start, end or durations can be provided
+ - Create an annotation event with a given category, title and description. Optionally start, end or durations can be provided.
author: "Nick Harring (@NickatEpic)"
requirements:
- - requests (either >= 2.0.0 for Python 3, or >= 1.0.0 for Python 2)
-notes:
- - Check mode isn't supported.
+ - requests (either >= 2.0.0 for Python 3, or >= 1.0.0 for Python 2)
extends_documentation_fragment:
- - community.general.attributes
+ - community.general.attributes
attributes:
- check_mode:
- support: none
- diff_mode:
- support: none
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
options:
- api_key:
- type: str
- description:
- - Circonus API key
- required: true
- category:
- type: str
- description:
- - Annotation Category
- required: true
+ api_key:
+ type: str
description:
- type: str
- description:
- - Description of annotation
- required: true
- title:
- type: str
- description:
- - Title of annotation
- required: true
- start:
- type: int
- description:
- - Unix timestamp of event start
- - If not specified, it defaults to "now".
- stop:
- type: int
- description:
- - Unix timestamp of event end
- - If not specified, it defaults to "now" + O(duration).
- duration:
- type: int
- description:
- - Duration in seconds of annotation
- default: 0
-'''
-EXAMPLES = '''
+ - Circonus API key.
+ required: true
+ category:
+ type: str
+ description:
+ - Annotation Category.
+ required: true
+ description:
+ type: str
+ description:
+ - Description of annotation.
+ required: true
+ title:
+ type: str
+ description:
+ - Title of annotation.
+ required: true
+ start:
+ type: int
+ description:
+ - Unix timestamp of event start.
+ - If not specified, it defaults to "now".
+ stop:
+ type: int
+ description:
+ - Unix timestamp of event end.
+ - If not specified, it defaults to "now" + O(duration).
+ duration:
+ type: int
+ description:
+ - Duration in seconds of annotation.
+ default: 0
+"""
+EXAMPLES = r"""
- name: Create a simple annotation event with a source, defaults to start and end time of now
community.general.circonus_annotation:
api_key: XXXXXXXXXXXXXXXXX
@@ -88,66 +85,67 @@ EXAMPLES = '''
category: This category groups like annotations
start_time: 1395940006
end_time: 1395954407
-'''
+"""
-RETURN = '''
+RETURN = r"""
annotation:
- description: details about the created annotation
- returned: success
- type: complex
- contains:
- _cid:
- description: annotation identifier
- returned: success
- type: str
- sample: /annotation/100000
- _created:
- description: creation timestamp
- returned: success
- type: int
- sample: 1502236928
- _last_modified:
- description: last modification timestamp
- returned: success
- type: int
- sample: 1502236928
- _last_modified_by:
- description: last modified by
- returned: success
- type: str
- sample: /user/1000
- category:
- description: category of the created annotation
- returned: success
- type: str
- sample: alerts
- title:
- description: title of the created annotation
- returned: success
- type: str
- sample: WARNING
- description:
- description: description of the created annotation
- returned: success
- type: str
- sample: Host is down.
- start:
- description: timestamp, since annotation applies
- returned: success
- type: int
- sample: Host is down.
- stop:
- description: timestamp, since annotation ends
- returned: success
- type: str
- sample: Host is down.
- rel_metrics:
- description: Array of metrics related to this annotation, each metrics is a string.
- returned: success
- type: list
- sample:
- - 54321_kbps
-'''
+ description: Details about the created annotation.
+ returned: success
+ type: complex
+ contains:
+ _cid:
+ description: Annotation identifier.
+ returned: success
+ type: str
+ sample: /annotation/100000
+ _created:
+ description: Creation timestamp.
+ returned: success
+ type: int
+ sample: 1502236928
+ _last_modified:
+ description: Last modification timestamp.
+ returned: success
+ type: int
+ sample: 1502236928
+ _last_modified_by:
+ description: Last modified by.
+ returned: success
+ type: str
+ sample: /user/1000
+ category:
+ description: Category of the created annotation.
+ returned: success
+ type: str
+ sample: alerts
+ title:
+ description: Title of the created annotation.
+ returned: success
+ type: str
+ sample: WARNING
+ description:
+ description: Description of the created annotation.
+ returned: success
+ type: str
+ sample: Host is down.
+ start:
+ description: Timestamp, since annotation applies.
+ returned: success
+ type: int
+ sample: Host is down.
+ stop:
+ description: Timestamp, since annotation ends.
+ returned: success
+ type: str
+ sample: Host is down.
+ rel_metrics:
+ description: Array of metrics related to this annotation, each metrics is a string.
+ returned: success
+ type: list
+ sample:
+ - 54321_kbps
+"""
+
import json
import time
import traceback
diff --git a/plugins/modules/cisco_webex.py b/plugins/modules/cisco_webex.py
index caa77f576d..f957f4121d 100644
--- a/plugins/modules/cisco_webex.py
+++ b/plugins/modules/cisco_webex.py
@@ -9,17 +9,15 @@ from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
module: cisco_webex
short_description: Send a message to a Cisco Webex Teams Room or Individual
description:
- - Send a message to a Cisco Webex Teams Room or Individual with options to control the formatting.
+ - Send a message to a Cisco Webex Teams Room or Individual with options to control the formatting.
author: Drew Rusell (@drew-russell)
notes:
- The O(recipient_type) must be valid for the supplied O(recipient_id).
- Full API documentation can be found at U(https://developer.webex.com/docs/api/basics).
-
extends_documentation_fragment:
- community.general.attributes
attributes:
@@ -32,8 +30,8 @@ options:
recipient_type:
description:
- - The request parameter you would like to send the message to.
- - Messages can be sent to either a room or individual (by ID or E-Mail).
+ - The request parameter you would like to send the message to.
+ - Messages can be sent to either a room or individual (by ID or E-Mail).
required: true
choices: ['roomId', 'toPersonEmail', 'toPersonId']
type: str
@@ -46,7 +44,7 @@ options:
msg_type:
description:
- - Specifies how you would like the message formatted.
+ - Specifies how you would like the message formatted.
default: text
choices: ['text', 'markdown']
type: str
@@ -64,9 +62,9 @@ options:
- The message you would like to send.
required: true
type: str
-'''
+"""
-EXAMPLES = """
+EXAMPLES = r"""
# Note: The following examples assume a variable file has been imported
# that contains the appropriate information.
@@ -101,10 +99,9 @@ EXAMPLES = """
msg_type: text
personal_token: "{{ token }}"
msg: "Cisco Webex Teams Ansible Module - Text Message to Individual by E-Mail"
-
"""
-RETURN = """
+RETURN = r"""
status_code:
description:
- The Response Code returned by the Webex Teams API.
@@ -114,12 +111,12 @@ status_code:
sample: 200
message:
- description:
- - The Response Message returned by the Webex Teams API.
- - Full Response Code explanations can be found at U(https://developer.webex.com/docs/api/basics).
- returned: always
- type: str
- sample: OK (585 bytes)
+ description:
+ - The Response Message returned by the Webex Teams API.
+ - Full Response Code explanations can be found at U(https://developer.webex.com/docs/api/basics).
+ returned: always
+ type: str
+ sample: OK (585 bytes)
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.urls import fetch_url
@@ -180,7 +177,7 @@ def main():
argument_spec=dict(
recipient_type=dict(required=True, choices=['roomId', 'toPersonEmail', 'toPersonId']),
recipient_id=dict(required=True, no_log=True),
- msg_type=dict(required=False, default='text', aliases=['message_type'], choices=['text', 'markdown']),
+ msg_type=dict(default='text', aliases=['message_type'], choices=['text', 'markdown']),
personal_token=dict(required=True, no_log=True, aliases=['token']),
msg=dict(required=True),
),
diff --git a/plugins/modules/clc_aa_policy.py b/plugins/modules/clc_aa_policy.py
deleted file mode 100644
index 05135bd957..0000000000
--- a/plugins/modules/clc_aa_policy.py
+++ /dev/null
@@ -1,353 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-#
-# Copyright (c) 2015 CenturyLink
-# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
-module: clc_aa_policy
-short_description: Create or Delete Anti Affinity Policies at CenturyLink Cloud
-description:
- - An Ansible module to Create or Delete Anti Affinity Policies at CenturyLink Cloud.
-extends_documentation_fragment:
- - community.general.attributes
-attributes:
- check_mode:
- support: full
- diff_mode:
- support: none
-options:
- name:
- description:
- - The name of the Anti Affinity Policy.
- type: str
- required: true
- location:
- description:
- - Datacenter in which the policy lives/should live.
- type: str
- required: true
- state:
- description:
- - Whether to create or delete the policy.
- type: str
- required: false
- default: present
- choices: ['present','absent']
-requirements:
- - python = 2.7
- - requests >= 2.5.0
- - clc-sdk
-author: "CLC Runner (@clc-runner)"
-notes:
- - To use this module, it is required to set the below environment variables which enables access to the
- Centurylink Cloud
- - CLC_V2_API_USERNAME, the account login id for the centurylink cloud
- - CLC_V2_API_PASSWORD, the account password for the centurylink cloud
- - Alternatively, the module accepts the API token and account alias. The API token can be generated using the
- CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login
- - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login
- - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud
- - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment.
-'''
-
-EXAMPLES = '''
-# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples
-
----
-- name: Create AA Policy
- hosts: localhost
- gather_facts: false
- connection: local
- tasks:
- - name: Create an Anti Affinity Policy
- community.general.clc_aa_policy:
- name: Hammer Time
- location: UK3
- state: present
- register: policy
-
- - name: Debug
- ansible.builtin.debug:
- var: policy
-
-- name: Delete AA Policy
- hosts: localhost
- gather_facts: false
- connection: local
- tasks:
- - name: Delete an Anti Affinity Policy
- community.general.clc_aa_policy:
- name: Hammer Time
- location: UK3
- state: absent
- register: policy
-
- - name: Debug
- ansible.builtin.debug:
- var: policy
-'''
-
-RETURN = '''
-policy:
- description: The anti affinity policy information
- returned: success
- type: dict
- sample:
- {
- "id":"1a28dd0988984d87b9cd61fa8da15424",
- "name":"test_aa_policy",
- "location":"UC1",
- "links":[
- {
- "rel":"self",
- "href":"/v2/antiAffinityPolicies/wfad/1a28dd0988984d87b9cd61fa8da15424",
- "verbs":[
- "GET",
- "DELETE",
- "PUT"
- ]
- },
- {
- "rel":"location",
- "href":"/v2/datacenters/wfad/UC1",
- "id":"uc1",
- "name":"UC1 - US West (Santa Clara)"
- }
- ]
- }
-'''
-
-__version__ = '${version}'
-
-import os
-import traceback
-
-from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
-
-REQUESTS_IMP_ERR = None
-try:
- import requests
-except ImportError:
- REQUESTS_IMP_ERR = traceback.format_exc()
- REQUESTS_FOUND = False
-else:
- REQUESTS_FOUND = True
-
-#
-# Requires the clc-python-sdk:
-# sudo pip install clc-sdk
-#
-CLC_IMP_ERR = None
-try:
- import clc as clc_sdk
- from clc import CLCException
-except ImportError:
- CLC_IMP_ERR = traceback.format_exc()
- CLC_FOUND = False
- clc_sdk = None
-else:
- CLC_FOUND = True
-
-from ansible.module_utils.basic import AnsibleModule, missing_required_lib
-
-
-class ClcAntiAffinityPolicy:
-
- clc = clc_sdk
- module = None
-
- def __init__(self, module):
- """
- Construct module
- """
- self.module = module
- self.policy_dict = {}
-
- if not CLC_FOUND:
- self.module.fail_json(msg=missing_required_lib('clc-sdk'),
- exception=CLC_IMP_ERR)
- if not REQUESTS_FOUND:
- self.module.fail_json(msg=missing_required_lib('requests'),
- exception=REQUESTS_IMP_ERR)
- if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'):
- self.module.fail_json(
- msg='requests library version should be >= 2.5.0')
-
- self._set_user_agent(self.clc)
-
- @staticmethod
- def _define_module_argument_spec():
- """
- Define the argument spec for the ansible module
- :return: argument spec dictionary
- """
- argument_spec = dict(
- name=dict(required=True),
- location=dict(required=True),
- state=dict(default='present', choices=['present', 'absent']),
- )
- return argument_spec
-
- # Module Behavior Goodness
- def process_request(self):
- """
- Process the request - Main Code Path
- :return: Returns with either an exit_json or fail_json
- """
- p = self.module.params
-
- self._set_clc_credentials_from_env()
- self.policy_dict = self._get_policies_for_datacenter(p)
-
- if p['state'] == "absent":
- changed, policy = self._ensure_policy_is_absent(p)
- else:
- changed, policy = self._ensure_policy_is_present(p)
-
- if hasattr(policy, 'data'):
- policy = policy.data
- elif hasattr(policy, '__dict__'):
- policy = policy.__dict__
-
- self.module.exit_json(changed=changed, policy=policy)
-
- def _set_clc_credentials_from_env(self):
- """
- Set the CLC Credentials on the sdk by reading environment variables
- :return: none
- """
- env = os.environ
- v2_api_token = env.get('CLC_V2_API_TOKEN', False)
- v2_api_username = env.get('CLC_V2_API_USERNAME', False)
- v2_api_passwd = env.get('CLC_V2_API_PASSWD', False)
- clc_alias = env.get('CLC_ACCT_ALIAS', False)
- api_url = env.get('CLC_V2_API_URL', False)
-
- if api_url:
- self.clc.defaults.ENDPOINT_URL_V2 = api_url
-
- if v2_api_token and clc_alias:
- self.clc._LOGIN_TOKEN_V2 = v2_api_token
- self.clc._V2_ENABLED = True
- self.clc.ALIAS = clc_alias
- elif v2_api_username and v2_api_passwd:
- self.clc.v2.SetCredentials(
- api_username=v2_api_username,
- api_passwd=v2_api_passwd)
- else:
- return self.module.fail_json(
- msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD "
- "environment variables")
-
- def _get_policies_for_datacenter(self, p):
- """
- Get the Policies for a datacenter by calling the CLC API.
- :param p: datacenter to get policies from
- :return: policies in the datacenter
- """
- response = {}
-
- policies = self.clc.v2.AntiAffinity.GetAll(location=p['location'])
-
- for policy in policies:
- response[policy.name] = policy
- return response
-
- def _create_policy(self, p):
- """
- Create an Anti Affinity Policy using the CLC API.
- :param p: datacenter to create policy in
- :return: response dictionary from the CLC API.
- """
- try:
- return self.clc.v2.AntiAffinity.Create(
- name=p['name'],
- location=p['location'])
- except CLCException as ex:
- self.module.fail_json(msg='Failed to create anti affinity policy : {0}. {1}'.format(
- p['name'], ex.response_text
- ))
-
- def _delete_policy(self, p):
- """
- Delete an Anti Affinity Policy using the CLC API.
- :param p: datacenter to delete a policy from
- :return: none
- """
- try:
- policy = self.policy_dict[p['name']]
- policy.Delete()
- except CLCException as ex:
- self.module.fail_json(msg='Failed to delete anti affinity policy : {0}. {1}'.format(
- p['name'], ex.response_text
- ))
-
- def _policy_exists(self, policy_name):
- """
- Check to see if an Anti Affinity Policy exists
- :param policy_name: name of the policy
- :return: boolean of if the policy exists
- """
- if policy_name in self.policy_dict:
- return self.policy_dict.get(policy_name)
-
- return False
-
- def _ensure_policy_is_absent(self, p):
- """
- Makes sure that a policy is absent
- :param p: dictionary of policy name
- :return: tuple of if a deletion occurred and the name of the policy that was deleted
- """
- changed = False
- if self._policy_exists(policy_name=p['name']):
- changed = True
- if not self.module.check_mode:
- self._delete_policy(p)
- return changed, None
-
- def _ensure_policy_is_present(self, p):
- """
- Ensures that a policy is present
- :param p: dictionary of a policy name
- :return: tuple of if an addition occurred and the name of the policy that was added
- """
- changed = False
- policy = self._policy_exists(policy_name=p['name'])
- if not policy:
- changed = True
- policy = None
- if not self.module.check_mode:
- policy = self._create_policy(p)
- return changed, policy
-
- @staticmethod
- def _set_user_agent(clc):
- if hasattr(clc, 'SetRequestsSession'):
- agent_string = "ClcAnsibleModule/" + __version__
- ses = requests.Session()
- ses.headers.update({"Api-Client": agent_string})
- ses.headers['User-Agent'] += " " + agent_string
- clc.SetRequestsSession(ses)
-
-
-def main():
- """
- The main function. Instantiates the module and calls process_request.
- :return: none
- """
- module = AnsibleModule(
- argument_spec=ClcAntiAffinityPolicy._define_module_argument_spec(),
- supports_check_mode=True)
- clc_aa_policy = ClcAntiAffinityPolicy(module)
- clc_aa_policy.process_request()
-
-
-if __name__ == '__main__':
- main()
diff --git a/plugins/modules/clc_alert_policy.py b/plugins/modules/clc_alert_policy.py
deleted file mode 100644
index b77c83e3b7..0000000000
--- a/plugins/modules/clc_alert_policy.py
+++ /dev/null
@@ -1,536 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-
-#
-# Copyright (c) 2015 CenturyLink
-# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
-module: clc_alert_policy
-short_description: Create or Delete Alert Policies at CenturyLink Cloud
-description:
- - An Ansible module to Create or Delete Alert Policies at CenturyLink Cloud.
-extends_documentation_fragment:
- - community.general.attributes
-attributes:
- check_mode:
- support: full
- diff_mode:
- support: none
-options:
- alias:
- description:
- - The alias of your CLC Account
- type: str
- required: true
- name:
- description:
- - The name of the alert policy. This is mutually exclusive with id
- type: str
- id:
- description:
- - The alert policy id. This is mutually exclusive with name
- type: str
- alert_recipients:
- description:
- - A list of recipient email ids to notify the alert.
- This is required for state 'present'
- type: list
- elements: str
- metric:
- description:
- - The metric on which to measure the condition that will trigger the alert.
- This is required for state 'present'
- type: str
- choices: ['cpu','memory','disk']
- duration:
- description:
- - The length of time in minutes that the condition must exceed the threshold.
- This is required for state 'present'
- type: str
- threshold:
- description:
- - The threshold that will trigger the alert when the metric equals or exceeds it.
- This is required for state 'present'
- This number represents a percentage and must be a value between 5.0 - 95.0 that is a multiple of 5.0
- type: int
- state:
- description:
- - Whether to create or delete the policy.
- type: str
- default: present
- choices: ['present','absent']
-requirements:
- - python = 2.7
- - requests >= 2.5.0
- - clc-sdk
-author: "CLC Runner (@clc-runner)"
-notes:
- - To use this module, it is required to set the below environment variables which enables access to the
- Centurylink Cloud
- - CLC_V2_API_USERNAME, the account login id for the centurylink cloud
- - CLC_V2_API_PASSWORD, the account password for the centurylink cloud
- - Alternatively, the module accepts the API token and account alias. The API token can be generated using the
- CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login
- - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login
- - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud
- - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment.
-'''
-
-EXAMPLES = '''
-# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples
-
----
-- name: Create Alert Policy Example
- hosts: localhost
- gather_facts: false
- connection: local
- tasks:
- - name: Create an Alert Policy for disk above 80% for 5 minutes
- community.general.clc_alert_policy:
- alias: wfad
- name: 'alert for disk > 80%'
- alert_recipients:
- - test1@centurylink.com
- - test2@centurylink.com
- metric: 'disk'
- duration: '00:05:00'
- threshold: 80
- state: present
- register: policy
-
- - name: Debug
- ansible.builtin.debug: var=policy
-
-- name: Delete Alert Policy Example
- hosts: localhost
- gather_facts: false
- connection: local
- tasks:
- - name: Delete an Alert Policy
- community.general.clc_alert_policy:
- alias: wfad
- name: 'alert for disk > 80%'
- state: absent
- register: policy
-
- - name: Debug
- ansible.builtin.debug: var=policy
-'''
-
-RETURN = '''
-policy:
- description: The alert policy information
- returned: success
- type: dict
- sample:
- {
- "actions": [
- {
- "action": "email",
- "settings": {
- "recipients": [
- "user1@domain.com",
- "user1@domain.com"
- ]
- }
- }
- ],
- "id": "ba54ac54a60d4a4f1ed6d48c1ce240a7",
- "links": [
- {
- "href": "/v2/alertPolicies/alias/ba54ac54a60d4a4fb1d6d48c1ce240a7",
- "rel": "self",
- "verbs": [
- "GET",
- "DELETE",
- "PUT"
- ]
- }
- ],
- "name": "test_alert",
- "triggers": [
- {
- "duration": "00:05:00",
- "metric": "disk",
- "threshold": 80.0
- }
- ]
- }
-'''
-
-__version__ = '${version}'
-
-import json
-import os
-import traceback
-
-from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
-
-REQUESTS_IMP_ERR = None
-try:
- import requests
-except ImportError:
- REQUESTS_IMP_ERR = traceback.format_exc()
- REQUESTS_FOUND = False
-else:
- REQUESTS_FOUND = True
-
-#
-# Requires the clc-python-sdk.
-# sudo pip install clc-sdk
-#
-CLC_IMP_ERR = None
-try:
- import clc as clc_sdk
- from clc import APIFailedResponse
-except ImportError:
- CLC_IMP_ERR = traceback.format_exc()
- CLC_FOUND = False
- clc_sdk = None
-else:
- CLC_FOUND = True
-
-from ansible.module_utils.basic import AnsibleModule, missing_required_lib
-
-
-class ClcAlertPolicy:
-
- clc = clc_sdk
- module = None
-
- def __init__(self, module):
- """
- Construct module
- """
- self.module = module
- self.policy_dict = {}
-
- if not CLC_FOUND:
- self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR)
- if not REQUESTS_FOUND:
- self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR)
- if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'):
- self.module.fail_json(
- msg='requests library version should be >= 2.5.0')
-
- self._set_user_agent(self.clc)
-
- @staticmethod
- def _define_module_argument_spec():
- """
- Define the argument spec for the ansible module
- :return: argument spec dictionary
- """
- argument_spec = dict(
- name=dict(),
- id=dict(),
- alias=dict(required=True),
- alert_recipients=dict(type='list', elements='str'),
- metric=dict(
- choices=[
- 'cpu',
- 'memory',
- 'disk']),
- duration=dict(type='str'),
- threshold=dict(type='int'),
- state=dict(default='present', choices=['present', 'absent'])
- )
- mutually_exclusive = [
- ['name', 'id']
- ]
- return {'argument_spec': argument_spec,
- 'mutually_exclusive': mutually_exclusive}
-
- # Module Behavior Goodness
- def process_request(self):
- """
- Process the request - Main Code Path
- :return: Returns with either an exit_json or fail_json
- """
- p = self.module.params
-
- self._set_clc_credentials_from_env()
- self.policy_dict = self._get_alert_policies(p['alias'])
-
- if p['state'] == 'present':
- changed, policy = self._ensure_alert_policy_is_present()
- else:
- changed, policy = self._ensure_alert_policy_is_absent()
-
- self.module.exit_json(changed=changed, policy=policy)
-
- def _set_clc_credentials_from_env(self):
- """
- Set the CLC Credentials on the sdk by reading environment variables
- :return: none
- """
- env = os.environ
- v2_api_token = env.get('CLC_V2_API_TOKEN', False)
- v2_api_username = env.get('CLC_V2_API_USERNAME', False)
- v2_api_passwd = env.get('CLC_V2_API_PASSWD', False)
- clc_alias = env.get('CLC_ACCT_ALIAS', False)
- api_url = env.get('CLC_V2_API_URL', False)
-
- if api_url:
- self.clc.defaults.ENDPOINT_URL_V2 = api_url
-
- if v2_api_token and clc_alias:
- self.clc._LOGIN_TOKEN_V2 = v2_api_token
- self.clc._V2_ENABLED = True
- self.clc.ALIAS = clc_alias
- elif v2_api_username and v2_api_passwd:
- self.clc.v2.SetCredentials(
- api_username=v2_api_username,
- api_passwd=v2_api_passwd)
- else:
- return self.module.fail_json(
- msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD "
- "environment variables")
-
- def _ensure_alert_policy_is_present(self):
- """
- Ensures that the alert policy is present
- :return: (changed, policy)
- changed: A flag representing if anything is modified
- policy: the created/updated alert policy
- """
- changed = False
- p = self.module.params
- policy_name = p.get('name')
-
- if not policy_name:
- self.module.fail_json(msg='Policy name is a required')
- policy = self._alert_policy_exists(policy_name)
- if not policy:
- changed = True
- policy = None
- if not self.module.check_mode:
- policy = self._create_alert_policy()
- else:
- changed_u, policy = self._ensure_alert_policy_is_updated(policy)
- if changed_u:
- changed = True
- return changed, policy
-
- def _ensure_alert_policy_is_absent(self):
- """
- Ensures that the alert policy is absent
- :return: (changed, None)
- changed: A flag representing if anything is modified
- """
- changed = False
- p = self.module.params
- alert_policy_id = p.get('id')
- alert_policy_name = p.get('name')
- alias = p.get('alias')
- if not alert_policy_id and not alert_policy_name:
- self.module.fail_json(
- msg='Either alert policy id or policy name is required')
- if not alert_policy_id and alert_policy_name:
- alert_policy_id = self._get_alert_policy_id(
- self.module,
- alert_policy_name)
- if alert_policy_id and alert_policy_id in self.policy_dict:
- changed = True
- if not self.module.check_mode:
- self._delete_alert_policy(alias, alert_policy_id)
- return changed, None
-
- def _ensure_alert_policy_is_updated(self, alert_policy):
- """
- Ensures the alert policy is updated if anything is changed in the alert policy configuration
- :param alert_policy: the target alert policy
- :return: (changed, policy)
- changed: A flag representing if anything is modified
- policy: the updated the alert policy
- """
- changed = False
- p = self.module.params
- alert_policy_id = alert_policy.get('id')
- email_list = p.get('alert_recipients')
- metric = p.get('metric')
- duration = p.get('duration')
- threshold = p.get('threshold')
- policy = alert_policy
- if (metric and metric != str(alert_policy.get('triggers')[0].get('metric'))) or \
- (duration and duration != str(alert_policy.get('triggers')[0].get('duration'))) or \
- (threshold and float(threshold) != float(alert_policy.get('triggers')[0].get('threshold'))):
- changed = True
- elif email_list:
- t_email_list = list(
- alert_policy.get('actions')[0].get('settings').get('recipients'))
- if set(email_list) != set(t_email_list):
- changed = True
- if changed and not self.module.check_mode:
- policy = self._update_alert_policy(alert_policy_id)
- return changed, policy
-
- def _get_alert_policies(self, alias):
- """
- Get the alert policies for account alias by calling the CLC API.
- :param alias: the account alias
- :return: the alert policies for the account alias
- """
- response = {}
-
- policies = self.clc.v2.API.Call('GET',
- '/v2/alertPolicies/%s'
- % alias)
-
- for policy in policies.get('items'):
- response[policy.get('id')] = policy
- return response
-
- def _create_alert_policy(self):
- """
- Create an alert Policy using the CLC API.
- :return: response dictionary from the CLC API.
- """
- p = self.module.params
- alias = p['alias']
- email_list = p['alert_recipients']
- metric = p['metric']
- duration = p['duration']
- threshold = p['threshold']
- policy_name = p['name']
- arguments = json.dumps(
- {
- 'name': policy_name,
- 'actions': [{
- 'action': 'email',
- 'settings': {
- 'recipients': email_list
- }
- }],
- 'triggers': [{
- 'metric': metric,
- 'duration': duration,
- 'threshold': threshold
- }]
- }
- )
- try:
- result = self.clc.v2.API.Call(
- 'POST',
- '/v2/alertPolicies/%s' % alias,
- arguments)
- except APIFailedResponse as e:
- return self.module.fail_json(
- msg='Unable to create alert policy "{0}". {1}'.format(
- policy_name, str(e.response_text)))
- return result
-
- def _update_alert_policy(self, alert_policy_id):
- """
- Update alert policy using the CLC API.
- :param alert_policy_id: The clc alert policy id
- :return: response dictionary from the CLC API.
- """
- p = self.module.params
- alias = p['alias']
- email_list = p['alert_recipients']
- metric = p['metric']
- duration = p['duration']
- threshold = p['threshold']
- policy_name = p['name']
- arguments = json.dumps(
- {
- 'name': policy_name,
- 'actions': [{
- 'action': 'email',
- 'settings': {
- 'recipients': email_list
- }
- }],
- 'triggers': [{
- 'metric': metric,
- 'duration': duration,
- 'threshold': threshold
- }]
- }
- )
- try:
- result = self.clc.v2.API.Call(
- 'PUT', '/v2/alertPolicies/%s/%s' %
- (alias, alert_policy_id), arguments)
- except APIFailedResponse as e:
- return self.module.fail_json(
- msg='Unable to update alert policy "{0}". {1}'.format(
- policy_name, str(e.response_text)))
- return result
-
- def _delete_alert_policy(self, alias, policy_id):
- """
- Delete an alert policy using the CLC API.
- :param alias : the account alias
- :param policy_id: the alert policy id
- :return: response dictionary from the CLC API.
- """
- try:
- result = self.clc.v2.API.Call(
- 'DELETE', '/v2/alertPolicies/%s/%s' %
- (alias, policy_id), None)
- except APIFailedResponse as e:
- return self.module.fail_json(
- msg='Unable to delete alert policy id "{0}". {1}'.format(
- policy_id, str(e.response_text)))
- return result
-
- def _alert_policy_exists(self, policy_name):
- """
- Check to see if an alert policy exists
- :param policy_name: name of the alert policy
- :return: boolean of if the policy exists
- """
- result = False
- for policy_id in self.policy_dict:
- if self.policy_dict.get(policy_id).get('name') == policy_name:
- result = self.policy_dict.get(policy_id)
- return result
-
- def _get_alert_policy_id(self, module, alert_policy_name):
- """
- retrieves the alert policy id of the account based on the name of the policy
- :param module: the AnsibleModule object
- :param alert_policy_name: the alert policy name
- :return: alert_policy_id: The alert policy id
- """
- alert_policy_id = None
- for policy_id in self.policy_dict:
- if self.policy_dict.get(policy_id).get('name') == alert_policy_name:
- if not alert_policy_id:
- alert_policy_id = policy_id
- else:
- return module.fail_json(
- msg='multiple alert policies were found with policy name : %s' % alert_policy_name)
- return alert_policy_id
-
- @staticmethod
- def _set_user_agent(clc):
- if hasattr(clc, 'SetRequestsSession'):
- agent_string = "ClcAnsibleModule/" + __version__
- ses = requests.Session()
- ses.headers.update({"Api-Client": agent_string})
- ses.headers['User-Agent'] += " " + agent_string
- clc.SetRequestsSession(ses)
-
-
-def main():
- """
- The main function. Instantiates the module and calls process_request.
- :return: none
- """
- argument_dict = ClcAlertPolicy._define_module_argument_spec()
- module = AnsibleModule(supports_check_mode=True, **argument_dict)
- clc_alert_policy = ClcAlertPolicy(module)
- clc_alert_policy.process_request()
-
-
-if __name__ == '__main__':
- main()
diff --git a/plugins/modules/clc_blueprint_package.py b/plugins/modules/clc_blueprint_package.py
deleted file mode 100644
index 672e06780f..0000000000
--- a/plugins/modules/clc_blueprint_package.py
+++ /dev/null
@@ -1,309 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-#
-# Copyright (c) 2015 CenturyLink
-# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
-module: clc_blueprint_package
-short_description: Deploys a blue print package on a set of servers in CenturyLink Cloud
-description:
- - An Ansible module to deploy blue print package on a set of servers in CenturyLink Cloud.
-extends_documentation_fragment:
- - community.general.attributes
-attributes:
- check_mode:
- support: full
- diff_mode:
- support: none
-options:
- server_ids:
- description:
- - A list of server Ids to deploy the blue print package.
- type: list
- required: true
- elements: str
- package_id:
- description:
- - The package id of the blue print.
- type: str
- required: true
- package_params:
- description:
- - The dictionary of arguments required to deploy the blue print.
- type: dict
- default: {}
- required: false
- state:
- description:
- - Whether to install or uninstall the package. Currently it supports only "present" for install action.
- type: str
- required: false
- default: present
- choices: ['present']
- wait:
- description:
- - Whether to wait for the tasks to finish before returning.
- type: str
- default: 'True'
- required: false
-requirements:
- - python = 2.7
- - requests >= 2.5.0
- - clc-sdk
-author: "CLC Runner (@clc-runner)"
-notes:
- - To use this module, it is required to set the below environment variables which enables access to the
- Centurylink Cloud
- - CLC_V2_API_USERNAME, the account login id for the centurylink cloud
- - CLC_V2_API_PASSWORD, the account password for the centurylink cloud
- - Alternatively, the module accepts the API token and account alias. The API token can be generated using the
- CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login
- - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login
- - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud
- - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment.
-'''
-
-EXAMPLES = '''
-# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples
-
-- name: Deploy package
- community.general.clc_blueprint_package:
- server_ids:
- - UC1TEST-SERVER1
- - UC1TEST-SERVER2
- package_id: 77abb844-579d-478d-3955-c69ab4a7ba1a
- package_params: {}
-'''
-
-RETURN = '''
-server_ids:
- description: The list of server ids that are changed
- returned: success
- type: list
- sample:
- [
- "UC1TEST-SERVER1",
- "UC1TEST-SERVER2"
- ]
-'''
-
-__version__ = '${version}'
-
-import os
-import traceback
-
-from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
-
-REQUESTS_IMP_ERR = None
-try:
- import requests
-except ImportError:
- REQUESTS_IMP_ERR = traceback.format_exc()
- REQUESTS_FOUND = False
-else:
- REQUESTS_FOUND = True
-
-#
-# Requires the clc-python-sdk.
-# sudo pip install clc-sdk
-#
-CLC_IMP_ERR = None
-try:
- import clc as clc_sdk
- from clc import CLCException
-except ImportError:
- CLC_IMP_ERR = traceback.format_exc()
- CLC_FOUND = False
- clc_sdk = None
-else:
- CLC_FOUND = True
-
-from ansible.module_utils.basic import AnsibleModule, missing_required_lib
-
-
-class ClcBlueprintPackage:
-
- clc = clc_sdk
- module = None
-
- def __init__(self, module):
- """
- Construct module
- """
- self.module = module
- if not CLC_FOUND:
- self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR)
- if not REQUESTS_FOUND:
- self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR)
- if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'):
- self.module.fail_json(
- msg='requests library version should be >= 2.5.0')
-
- self._set_user_agent(self.clc)
-
- def process_request(self):
- """
- Process the request - Main Code Path
- :return: Returns with either an exit_json or fail_json
- """
- p = self.module.params
- changed = False
- changed_server_ids = []
- self._set_clc_credentials_from_env()
- server_ids = p['server_ids']
- package_id = p['package_id']
- package_params = p['package_params']
- state = p['state']
- if state == 'present':
- changed, changed_server_ids, request_list = self.ensure_package_installed(
- server_ids, package_id, package_params)
- self._wait_for_requests_to_complete(request_list)
- self.module.exit_json(changed=changed, server_ids=changed_server_ids)
-
- @staticmethod
- def define_argument_spec():
- """
- This function defines the dictionary object required for
- package module
- :return: the package dictionary object
- """
- argument_spec = dict(
- server_ids=dict(type='list', elements='str', required=True),
- package_id=dict(required=True),
- package_params=dict(type='dict', default={}),
- wait=dict(default=True), # @FIXME should be bool?
- state=dict(default='present', choices=['present'])
- )
- return argument_spec
-
- def ensure_package_installed(self, server_ids, package_id, package_params):
- """
- Ensure the package is installed in the given list of servers
- :param server_ids: the server list where the package needs to be installed
- :param package_id: the blueprint package id
- :param package_params: the package arguments
- :return: (changed, server_ids, request_list)
- changed: A flag indicating if a change was made
- server_ids: The list of servers modified
- request_list: The list of request objects from clc-sdk
- """
- changed = False
- request_list = []
- servers = self._get_servers_from_clc(
- server_ids,
- 'Failed to get servers from CLC')
- for server in servers:
- if not self.module.check_mode:
- request = self.clc_install_package(
- server,
- package_id,
- package_params)
- request_list.append(request)
- changed = True
- return changed, server_ids, request_list
-
- def clc_install_package(self, server, package_id, package_params):
- """
- Install the package to a given clc server
- :param server: The server object where the package needs to be installed
- :param package_id: The blue print package id
- :param package_params: the required argument dict for the package installation
- :return: The result object from the CLC API call
- """
- result = None
- try:
- result = server.ExecutePackage(
- package_id=package_id,
- parameters=package_params)
- except CLCException as ex:
- self.module.fail_json(msg='Failed to install package : {0} to server {1}. {2}'.format(
- package_id, server.id, ex.message
- ))
- return result
-
- def _wait_for_requests_to_complete(self, request_lst):
- """
- Waits until the CLC requests are complete if the wait argument is True
- :param request_lst: The list of CLC request objects
- :return: none
- """
- if not self.module.params['wait']:
- return
- for request in request_lst:
- request.WaitUntilComplete()
- for request_details in request.requests:
- if request_details.Status() != 'succeeded':
- self.module.fail_json(
- msg='Unable to process package install request')
-
- def _get_servers_from_clc(self, server_list, message):
- """
- Internal function to fetch list of CLC server objects from a list of server ids
- :param server_list: the list of server ids
- :param message: the error message to raise if there is any error
- :return the list of CLC server objects
- """
- try:
- return self.clc.v2.Servers(server_list).servers
- except CLCException as ex:
- self.module.fail_json(msg=message + ': %s' % ex)
-
- def _set_clc_credentials_from_env(self):
- """
- Set the CLC Credentials on the sdk by reading environment variables
- :return: none
- """
- env = os.environ
- v2_api_token = env.get('CLC_V2_API_TOKEN', False)
- v2_api_username = env.get('CLC_V2_API_USERNAME', False)
- v2_api_passwd = env.get('CLC_V2_API_PASSWD', False)
- clc_alias = env.get('CLC_ACCT_ALIAS', False)
- api_url = env.get('CLC_V2_API_URL', False)
-
- if api_url:
- self.clc.defaults.ENDPOINT_URL_V2 = api_url
-
- if v2_api_token and clc_alias:
- self.clc._LOGIN_TOKEN_V2 = v2_api_token
- self.clc._V2_ENABLED = True
- self.clc.ALIAS = clc_alias
- elif v2_api_username and v2_api_passwd:
- self.clc.v2.SetCredentials(
- api_username=v2_api_username,
- api_passwd=v2_api_passwd)
- else:
- return self.module.fail_json(
- msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD "
- "environment variables")
-
- @staticmethod
- def _set_user_agent(clc):
- if hasattr(clc, 'SetRequestsSession'):
- agent_string = "ClcAnsibleModule/" + __version__
- ses = requests.Session()
- ses.headers.update({"Api-Client": agent_string})
- ses.headers['User-Agent'] += " " + agent_string
- clc.SetRequestsSession(ses)
-
-
-def main():
- """
- Main function
- :return: None
- """
- module = AnsibleModule(
- argument_spec=ClcBlueprintPackage.define_argument_spec(),
- supports_check_mode=True
- )
- clc_blueprint_package = ClcBlueprintPackage(module)
- clc_blueprint_package.process_request()
-
-
-if __name__ == '__main__':
- main()
diff --git a/plugins/modules/clc_firewall_policy.py b/plugins/modules/clc_firewall_policy.py
deleted file mode 100644
index b30037c6fe..0000000000
--- a/plugins/modules/clc_firewall_policy.py
+++ /dev/null
@@ -1,596 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-#
-# Copyright (c) 2015 CenturyLink
-# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
-module: clc_firewall_policy
-short_description: Create/delete/update firewall policies
-description:
- - Create or delete or update firewall policies on Centurylink Cloud
-extends_documentation_fragment:
- - community.general.attributes
-attributes:
- check_mode:
- support: full
- diff_mode:
- support: none
-options:
- location:
- description:
- - Target datacenter for the firewall policy
- type: str
- required: true
- state:
- description:
- - Whether to create or delete the firewall policy
- type: str
- default: present
- choices: ['present', 'absent']
- source:
- description:
- - The list of source addresses for traffic on the originating firewall.
- This is required when state is 'present'
- type: list
- elements: str
- destination:
- description:
- - The list of destination addresses for traffic on the terminating firewall.
- This is required when state is 'present'
- type: list
- elements: str
- ports:
- description:
- - The list of ports associated with the policy.
- TCP and UDP can take in single ports or port ranges.
- - "Example: V(['any', 'icmp', 'TCP/123', 'UDP/123', 'TCP/123-456', 'UDP/123-456'])."
- type: list
- elements: str
- firewall_policy_id:
- description:
- - Id of the firewall policy. This is required to update or delete an existing firewall policy
- type: str
- source_account_alias:
- description:
- - CLC alias for the source account
- type: str
- required: true
- destination_account_alias:
- description:
- - CLC alias for the destination account
- type: str
- wait:
- description:
- - Whether to wait for the provisioning tasks to finish before returning.
- type: str
- default: 'True'
- enabled:
- description:
- - Whether the firewall policy is enabled or disabled
- type: str
- choices: ['True', 'False']
- default: 'True'
-requirements:
- - python = 2.7
- - requests >= 2.5.0
- - clc-sdk
-author: "CLC Runner (@clc-runner)"
-notes:
- - To use this module, it is required to set the below environment variables which enables access to the
- Centurylink Cloud
- - CLC_V2_API_USERNAME, the account login id for the centurylink cloud
- - CLC_V2_API_PASSWORD, the account password for the centurylink cloud
- - Alternatively, the module accepts the API token and account alias. The API token can be generated using the
- CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login
- - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login
- - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud
- - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment.
-'''
-
-EXAMPLES = '''
----
-- name: Create Firewall Policy
- hosts: localhost
- gather_facts: false
- connection: local
- tasks:
- - name: Create / Verify an Firewall Policy at CenturyLink Cloud
- clc_firewall:
- source_account_alias: WFAD
- location: VA1
- state: present
- source: 10.128.216.0/24
- destination: 10.128.216.0/24
- ports: Any
- destination_account_alias: WFAD
-
-- name: Delete Firewall Policy
- hosts: localhost
- gather_facts: false
- connection: local
- tasks:
- - name: Delete an Firewall Policy at CenturyLink Cloud
- clc_firewall:
- source_account_alias: WFAD
- location: VA1
- state: absent
- firewall_policy_id: c62105233d7a4231bd2e91b9c791e43e1
-'''
-
-RETURN = '''
-firewall_policy_id:
- description: The fire wall policy id
- returned: success
- type: str
- sample: fc36f1bfd47242e488a9c44346438c05
-firewall_policy:
- description: The fire wall policy information
- returned: success
- type: dict
- sample:
- {
- "destination":[
- "10.1.1.0/24",
- "10.2.2.0/24"
- ],
- "destinationAccount":"wfad",
- "enabled":true,
- "id":"fc36f1bfd47242e488a9c44346438c05",
- "links":[
- {
- "href":"http://api.ctl.io/v2-experimental/firewallPolicies/wfad/uc1/fc36f1bfd47242e488a9c44346438c05",
- "rel":"self",
- "verbs":[
- "GET",
- "PUT",
- "DELETE"
- ]
- }
- ],
- "ports":[
- "any"
- ],
- "source":[
- "10.1.1.0/24",
- "10.2.2.0/24"
- ],
- "status":"active"
- }
-'''
-
-__version__ = '${version}'
-
-import os
-import traceback
-from ansible.module_utils.six.moves.urllib.parse import urlparse
-from time import sleep
-
-from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
-
-REQUESTS_IMP_ERR = None
-try:
- import requests
-except ImportError:
- REQUESTS_IMP_ERR = traceback.format_exc()
- REQUESTS_FOUND = False
-else:
- REQUESTS_FOUND = True
-
-CLC_IMP_ERR = None
-try:
- import clc as clc_sdk
- from clc import APIFailedResponse
-except ImportError:
- CLC_IMP_ERR = traceback.format_exc()
- CLC_FOUND = False
- clc_sdk = None
-else:
- CLC_FOUND = True
-
-from ansible.module_utils.basic import AnsibleModule, missing_required_lib
-
-
-class ClcFirewallPolicy:
-
- clc = None
-
- def __init__(self, module):
- """
- Construct module
- """
- self.clc = clc_sdk
- self.module = module
- self.firewall_dict = {}
-
- if not CLC_FOUND:
- self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR)
- if not REQUESTS_FOUND:
- self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR)
- if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'):
- self.module.fail_json(
- msg='requests library version should be >= 2.5.0')
-
- self._set_user_agent(self.clc)
-
- @staticmethod
- def _define_module_argument_spec():
- """
- Define the argument spec for the ansible module
- :return: argument spec dictionary
- """
- argument_spec = dict(
- location=dict(required=True),
- source_account_alias=dict(required=True),
- destination_account_alias=dict(),
- firewall_policy_id=dict(),
- ports=dict(type='list', elements='str'),
- source=dict(type='list', elements='str'),
- destination=dict(type='list', elements='str'),
- wait=dict(default=True), # @FIXME type=bool
- state=dict(default='present', choices=['present', 'absent']),
- enabled=dict(default=True, choices=[True, False])
- )
- return argument_spec
-
- def process_request(self):
- """
- Execute the main code path, and handle the request
- :return: none
- """
- changed = False
- firewall_policy = None
- location = self.module.params.get('location')
- source_account_alias = self.module.params.get('source_account_alias')
- destination_account_alias = self.module.params.get(
- 'destination_account_alias')
- firewall_policy_id = self.module.params.get('firewall_policy_id')
- ports = self.module.params.get('ports')
- source = self.module.params.get('source')
- destination = self.module.params.get('destination')
- wait = self.module.params.get('wait')
- state = self.module.params.get('state')
- enabled = self.module.params.get('enabled')
-
- self.firewall_dict = {
- 'location': location,
- 'source_account_alias': source_account_alias,
- 'destination_account_alias': destination_account_alias,
- 'firewall_policy_id': firewall_policy_id,
- 'ports': ports,
- 'source': source,
- 'destination': destination,
- 'wait': wait,
- 'state': state,
- 'enabled': enabled}
-
- self._set_clc_credentials_from_env()
-
- if state == 'absent':
- changed, firewall_policy_id, firewall_policy = self._ensure_firewall_policy_is_absent(
- source_account_alias, location, self.firewall_dict)
-
- elif state == 'present':
- changed, firewall_policy_id, firewall_policy = self._ensure_firewall_policy_is_present(
- source_account_alias, location, self.firewall_dict)
-
- return self.module.exit_json(
- changed=changed,
- firewall_policy_id=firewall_policy_id,
- firewall_policy=firewall_policy)
-
- @staticmethod
- def _get_policy_id_from_response(response):
- """
- Method to parse out the policy id from creation response
- :param response: response from firewall creation API call
- :return: policy_id: firewall policy id from creation call
- """
- url = response.get('links')[0]['href']
- path = urlparse(url).path
- path_list = os.path.split(path)
- policy_id = path_list[-1]
- return policy_id
-
- def _set_clc_credentials_from_env(self):
- """
- Set the CLC Credentials on the sdk by reading environment variables
- :return: none
- """
- env = os.environ
- v2_api_token = env.get('CLC_V2_API_TOKEN', False)
- v2_api_username = env.get('CLC_V2_API_USERNAME', False)
- v2_api_passwd = env.get('CLC_V2_API_PASSWD', False)
- clc_alias = env.get('CLC_ACCT_ALIAS', False)
- api_url = env.get('CLC_V2_API_URL', False)
-
- if api_url:
- self.clc.defaults.ENDPOINT_URL_V2 = api_url
-
- if v2_api_token and clc_alias:
- self.clc._LOGIN_TOKEN_V2 = v2_api_token
- self.clc._V2_ENABLED = True
- self.clc.ALIAS = clc_alias
- elif v2_api_username and v2_api_passwd:
- self.clc.v2.SetCredentials(
- api_username=v2_api_username,
- api_passwd=v2_api_passwd)
- else:
- return self.module.fail_json(
- msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD "
- "environment variables")
-
- def _ensure_firewall_policy_is_present(
- self,
- source_account_alias,
- location,
- firewall_dict):
- """
- Ensures that a given firewall policy is present
- :param source_account_alias: the source account alias for the firewall policy
- :param location: datacenter of the firewall policy
- :param firewall_dict: dictionary of request parameters for firewall policy
- :return: (changed, firewall_policy_id, firewall_policy)
- changed: flag for if a change occurred
- firewall_policy_id: the firewall policy id that was created/updated
- firewall_policy: The firewall_policy object
- """
- firewall_policy = None
- firewall_policy_id = firewall_dict.get('firewall_policy_id')
-
- if firewall_policy_id is None:
- if not self.module.check_mode:
- response = self._create_firewall_policy(
- source_account_alias,
- location,
- firewall_dict)
- firewall_policy_id = self._get_policy_id_from_response(
- response)
- changed = True
- else:
- firewall_policy = self._get_firewall_policy(
- source_account_alias, location, firewall_policy_id)
- if not firewall_policy:
- return self.module.fail_json(
- msg='Unable to find the firewall policy id : {0}'.format(
- firewall_policy_id))
- changed = self._compare_get_request_with_dict(
- firewall_policy,
- firewall_dict)
- if not self.module.check_mode and changed:
- self._update_firewall_policy(
- source_account_alias,
- location,
- firewall_policy_id,
- firewall_dict)
- if changed and firewall_policy_id:
- firewall_policy = self._wait_for_requests_to_complete(
- source_account_alias,
- location,
- firewall_policy_id)
- return changed, firewall_policy_id, firewall_policy
-
- def _ensure_firewall_policy_is_absent(
- self,
- source_account_alias,
- location,
- firewall_dict):
- """
- Ensures that a given firewall policy is removed if present
- :param source_account_alias: the source account alias for the firewall policy
- :param location: datacenter of the firewall policy
- :param firewall_dict: firewall policy to delete
- :return: (changed, firewall_policy_id, response)
- changed: flag for if a change occurred
- firewall_policy_id: the firewall policy id that was deleted
- response: response from CLC API call
- """
- changed = False
- response = []
- firewall_policy_id = firewall_dict.get('firewall_policy_id')
- result = self._get_firewall_policy(
- source_account_alias, location, firewall_policy_id)
- if result:
- if not self.module.check_mode:
- response = self._delete_firewall_policy(
- source_account_alias,
- location,
- firewall_policy_id)
- changed = True
- return changed, firewall_policy_id, response
-
- def _create_firewall_policy(
- self,
- source_account_alias,
- location,
- firewall_dict):
- """
- Creates the firewall policy for the given account alias
- :param source_account_alias: the source account alias for the firewall policy
- :param location: datacenter of the firewall policy
- :param firewall_dict: dictionary of request parameters for firewall policy
- :return: response from CLC API call
- """
- payload = {
- 'destinationAccount': firewall_dict.get('destination_account_alias'),
- 'source': firewall_dict.get('source'),
- 'destination': firewall_dict.get('destination'),
- 'ports': firewall_dict.get('ports')}
- try:
- response = self.clc.v2.API.Call(
- 'POST', '/v2-experimental/firewallPolicies/%s/%s' %
- (source_account_alias, location), payload)
- except APIFailedResponse as e:
- return self.module.fail_json(
- msg="Unable to create firewall policy. %s" %
- str(e.response_text))
- return response
-
- def _delete_firewall_policy(
- self,
- source_account_alias,
- location,
- firewall_policy_id):
- """
- Deletes a given firewall policy for an account alias in a datacenter
- :param source_account_alias: the source account alias for the firewall policy
- :param location: datacenter of the firewall policy
- :param firewall_policy_id: firewall policy id to delete
- :return: response: response from CLC API call
- """
- try:
- response = self.clc.v2.API.Call(
- 'DELETE', '/v2-experimental/firewallPolicies/%s/%s/%s' %
- (source_account_alias, location, firewall_policy_id))
- except APIFailedResponse as e:
- return self.module.fail_json(
- msg="Unable to delete the firewall policy id : {0}. {1}".format(
- firewall_policy_id, str(e.response_text)))
- return response
-
- def _update_firewall_policy(
- self,
- source_account_alias,
- location,
- firewall_policy_id,
- firewall_dict):
- """
- Updates a firewall policy for a given datacenter and account alias
- :param source_account_alias: the source account alias for the firewall policy
- :param location: datacenter of the firewall policy
- :param firewall_policy_id: firewall policy id to update
- :param firewall_dict: dictionary of request parameters for firewall policy
- :return: response: response from CLC API call
- """
- try:
- response = self.clc.v2.API.Call(
- 'PUT',
- '/v2-experimental/firewallPolicies/%s/%s/%s' %
- (source_account_alias,
- location,
- firewall_policy_id),
- firewall_dict)
- except APIFailedResponse as e:
- return self.module.fail_json(
- msg="Unable to update the firewall policy id : {0}. {1}".format(
- firewall_policy_id, str(e.response_text)))
- return response
-
- @staticmethod
- def _compare_get_request_with_dict(response, firewall_dict):
- """
- Helper method to compare the json response for getting the firewall policy with the request parameters
- :param response: response from the get method
- :param firewall_dict: dictionary of request parameters for firewall policy
- :return: changed: Boolean that returns true if there are differences between
- the response parameters and the playbook parameters
- """
-
- changed = False
-
- response_dest_account_alias = response.get('destinationAccount')
- response_enabled = response.get('enabled')
- response_source = response.get('source')
- response_dest = response.get('destination')
- response_ports = response.get('ports')
- request_dest_account_alias = firewall_dict.get(
- 'destination_account_alias')
- request_enabled = firewall_dict.get('enabled')
- if request_enabled is None:
- request_enabled = True
- request_source = firewall_dict.get('source')
- request_dest = firewall_dict.get('destination')
- request_ports = firewall_dict.get('ports')
-
- if (
- response_dest_account_alias and str(response_dest_account_alias) != str(request_dest_account_alias)) or (
- response_enabled != request_enabled) or (
- response_source and response_source != request_source) or (
- response_dest and response_dest != request_dest) or (
- response_ports and response_ports != request_ports):
- changed = True
- return changed
-
- def _get_firewall_policy(
- self,
- source_account_alias,
- location,
- firewall_policy_id):
- """
- Get back details for a particular firewall policy
- :param source_account_alias: the source account alias for the firewall policy
- :param location: datacenter of the firewall policy
- :param firewall_policy_id: id of the firewall policy to get
- :return: response - The response from CLC API call
- """
- response = None
- try:
- response = self.clc.v2.API.Call(
- 'GET', '/v2-experimental/firewallPolicies/%s/%s/%s' %
- (source_account_alias, location, firewall_policy_id))
- except APIFailedResponse as e:
- if e.response_status_code != 404:
- self.module.fail_json(
- msg="Unable to fetch the firewall policy with id : {0}. {1}".format(
- firewall_policy_id, str(e.response_text)))
- return response
-
- def _wait_for_requests_to_complete(
- self,
- source_account_alias,
- location,
- firewall_policy_id,
- wait_limit=50):
- """
- Waits until the CLC requests are complete if the wait argument is True
- :param source_account_alias: The source account alias for the firewall policy
- :param location: datacenter of the firewall policy
- :param firewall_policy_id: The firewall policy id
- :param wait_limit: The number of times to check the status for completion
- :return: the firewall_policy object
- """
- wait = self.module.params.get('wait')
- count = 0
- firewall_policy = None
- while wait:
- count += 1
- firewall_policy = self._get_firewall_policy(
- source_account_alias, location, firewall_policy_id)
- status = firewall_policy.get('status')
- if status == 'active' or count > wait_limit:
- wait = False
- else:
- # wait for 2 seconds
- sleep(2)
- return firewall_policy
-
- @staticmethod
- def _set_user_agent(clc):
- if hasattr(clc, 'SetRequestsSession'):
- agent_string = "ClcAnsibleModule/" + __version__
- ses = requests.Session()
- ses.headers.update({"Api-Client": agent_string})
- ses.headers['User-Agent'] += " " + agent_string
- clc.SetRequestsSession(ses)
-
-
-def main():
- """
- The main function. Instantiates the module and calls process_request.
- :return: none
- """
- module = AnsibleModule(
- argument_spec=ClcFirewallPolicy._define_module_argument_spec(),
- supports_check_mode=True)
-
- clc_firewall = ClcFirewallPolicy(module)
- clc_firewall.process_request()
-
-
-if __name__ == '__main__':
- main()
diff --git a/plugins/modules/clc_group.py b/plugins/modules/clc_group.py
deleted file mode 100644
index 88aef2d63d..0000000000
--- a/plugins/modules/clc_group.py
+++ /dev/null
@@ -1,522 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-
-#
-# Copyright (c) 2015 CenturyLink
-# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
-module: clc_group
-short_description: Create/delete Server Groups at Centurylink Cloud
-description:
- - Create or delete Server Groups at Centurylink Centurylink Cloud
-extends_documentation_fragment:
- - community.general.attributes
-attributes:
- check_mode:
- support: full
- diff_mode:
- support: none
-options:
- name:
- description:
- - The name of the Server Group
- type: str
- required: true
- description:
- description:
- - A description of the Server Group
- type: str
- required: false
- parent:
- description:
- - The parent group of the server group. If parent is not provided, it creates the group at top level.
- type: str
- required: false
- location:
- description:
- - Datacenter to create the group in. If location is not provided, the group gets created in the default datacenter
- associated with the account
- type: str
- required: false
- state:
- description:
- - Whether to create or delete the group
- type: str
- default: present
- choices: ['present', 'absent']
- wait:
- description:
- - Whether to wait for the tasks to finish before returning.
- type: bool
- default: true
- required: false
-requirements:
- - python = 2.7
- - requests >= 2.5.0
- - clc-sdk
-author: "CLC Runner (@clc-runner)"
-notes:
- - To use this module, it is required to set the below environment variables which enables access to the
- Centurylink Cloud
- - CLC_V2_API_USERNAME, the account login id for the centurylink cloud
- - CLC_V2_API_PASSWORD, the account password for the centurylink cloud
- - Alternatively, the module accepts the API token and account alias. The API token can be generated using the
- CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login
- - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login
- - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud
- - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment.
-'''
-
-EXAMPLES = '''
-
-# Create a Server Group
-
----
-- name: Create Server Group
- hosts: localhost
- gather_facts: false
- connection: local
- tasks:
- - name: Create / Verify a Server Group at CenturyLink Cloud
- community.general.clc_group:
- name: My Cool Server Group
- parent: Default Group
- state: present
- register: clc
-
- - name: Debug
- ansible.builtin.debug:
- var: clc
-
-# Delete a Server Group
-- name: Delete Server Group
- hosts: localhost
- gather_facts: false
- connection: local
- tasks:
- - name: Delete / Verify Absent a Server Group at CenturyLink Cloud
- community.general.clc_group:
- name: My Cool Server Group
- parent: Default Group
- state: absent
- register: clc
-
- - name: Debug
- ansible.builtin.debug:
- var: clc
-'''
-
-RETURN = '''
-group:
- description: The group information
- returned: success
- type: dict
- sample:
- {
- "changeInfo":{
- "createdBy":"service.wfad",
- "createdDate":"2015-07-29T18:52:47Z",
- "modifiedBy":"service.wfad",
- "modifiedDate":"2015-07-29T18:52:47Z"
- },
- "customFields":[
-
- ],
- "description":"test group",
- "groups":[
-
- ],
- "id":"bb5f12a3c6044ae4ad0a03e73ae12cd1",
- "links":[
- {
- "href":"/v2/groups/wfad",
- "rel":"createGroup",
- "verbs":[
- "POST"
- ]
- },
- {
- "href":"/v2/servers/wfad",
- "rel":"createServer",
- "verbs":[
- "POST"
- ]
- },
- {
- "href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1",
- "rel":"self",
- "verbs":[
- "GET",
- "PATCH",
- "DELETE"
- ]
- },
- {
- "href":"/v2/groups/wfad/086ac1dfe0b6411989e8d1b77c4065f0",
- "id":"086ac1dfe0b6411989e8d1b77c4065f0",
- "rel":"parentGroup"
- },
- {
- "href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/defaults",
- "rel":"defaults",
- "verbs":[
- "GET",
- "POST"
- ]
- },
- {
- "href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/billing",
- "rel":"billing"
- },
- {
- "href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/archive",
- "rel":"archiveGroupAction"
- },
- {
- "href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/statistics",
- "rel":"statistics"
- },
- {
- "href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/upcomingScheduledActivities",
- "rel":"upcomingScheduledActivities"
- },
- {
- "href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/horizontalAutoscalePolicy",
- "rel":"horizontalAutoscalePolicyMapping",
- "verbs":[
- "GET",
- "PUT",
- "DELETE"
- ]
- },
- {
- "href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/scheduledActivities",
- "rel":"scheduledActivities",
- "verbs":[
- "GET",
- "POST"
- ]
- }
- ],
- "locationId":"UC1",
- "name":"test group",
- "status":"active",
- "type":"default"
- }
-'''
-
-__version__ = '${version}'
-
-import os
-import traceback
-
-from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
-
-REQUESTS_IMP_ERR = None
-try:
- import requests
-except ImportError:
- REQUESTS_IMP_ERR = traceback.format_exc()
- REQUESTS_FOUND = False
-else:
- REQUESTS_FOUND = True
-
-#
-# Requires the clc-python-sdk.
-# sudo pip install clc-sdk
-#
-CLC_IMP_ERR = None
-try:
- import clc as clc_sdk
- from clc import CLCException
-except ImportError:
- CLC_IMP_ERR = traceback.format_exc()
- CLC_FOUND = False
- clc_sdk = None
-else:
- CLC_FOUND = True
-
-from ansible.module_utils.basic import AnsibleModule, missing_required_lib
-
-
-class ClcGroup(object):
-
- clc = None
- root_group = None
-
- def __init__(self, module):
- """
- Construct module
- """
- self.clc = clc_sdk
- self.module = module
- self.group_dict = {}
-
- if not CLC_FOUND:
- self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR)
- if not REQUESTS_FOUND:
- self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR)
- if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'):
- self.module.fail_json(
- msg='requests library version should be >= 2.5.0')
-
- self._set_user_agent(self.clc)
-
- def process_request(self):
- """
- Execute the main code path, and handle the request
- :return: none
- """
- location = self.module.params.get('location')
- group_name = self.module.params.get('name')
- parent_name = self.module.params.get('parent')
- group_description = self.module.params.get('description')
- state = self.module.params.get('state')
-
- self._set_clc_credentials_from_env()
- self.group_dict = self._get_group_tree_for_datacenter(
- datacenter=location)
-
- if state == "absent":
- changed, group, requests = self._ensure_group_is_absent(
- group_name=group_name, parent_name=parent_name)
- if requests:
- self._wait_for_requests_to_complete(requests)
- else:
- changed, group = self._ensure_group_is_present(
- group_name=group_name, parent_name=parent_name, group_description=group_description)
- try:
- group = group.data
- except AttributeError:
- group = group_name
- self.module.exit_json(changed=changed, group=group)
-
- @staticmethod
- def _define_module_argument_spec():
- """
- Define the argument spec for the ansible module
- :return: argument spec dictionary
- """
- argument_spec = dict(
- name=dict(required=True),
- description=dict(),
- parent=dict(),
- location=dict(),
- state=dict(default='present', choices=['present', 'absent']),
- wait=dict(type='bool', default=True))
-
- return argument_spec
-
- def _set_clc_credentials_from_env(self):
- """
- Set the CLC Credentials on the sdk by reading environment variables
- :return: none
- """
- env = os.environ
- v2_api_token = env.get('CLC_V2_API_TOKEN', False)
- v2_api_username = env.get('CLC_V2_API_USERNAME', False)
- v2_api_passwd = env.get('CLC_V2_API_PASSWD', False)
- clc_alias = env.get('CLC_ACCT_ALIAS', False)
- api_url = env.get('CLC_V2_API_URL', False)
-
- if api_url:
- self.clc.defaults.ENDPOINT_URL_V2 = api_url
-
- if v2_api_token and clc_alias:
- self.clc._LOGIN_TOKEN_V2 = v2_api_token
- self.clc._V2_ENABLED = True
- self.clc.ALIAS = clc_alias
- elif v2_api_username and v2_api_passwd:
- self.clc.v2.SetCredentials(
- api_username=v2_api_username,
- api_passwd=v2_api_passwd)
- else:
- return self.module.fail_json(
- msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD "
- "environment variables")
-
- def _ensure_group_is_absent(self, group_name, parent_name):
- """
- Ensure that group_name is absent by deleting it if necessary
- :param group_name: string - the name of the clc server group to delete
- :param parent_name: string - the name of the parent group for group_name
- :return: changed, group
- """
- changed = False
- group = []
- results = []
-
- if self._group_exists(group_name=group_name, parent_name=parent_name):
- if not self.module.check_mode:
- group.append(group_name)
- result = self._delete_group(group_name)
- results.append(result)
- changed = True
- return changed, group, results
-
- def _delete_group(self, group_name):
- """
- Delete the provided server group
- :param group_name: string - the server group to delete
- :return: none
- """
- response = None
- group, parent = self.group_dict.get(group_name)
- try:
- response = group.Delete()
- except CLCException as ex:
- self.module.fail_json(msg='Failed to delete group :{0}. {1}'.format(
- group_name, ex.response_text
- ))
- return response
-
- def _ensure_group_is_present(
- self,
- group_name,
- parent_name,
- group_description):
- """
- Checks to see if a server group exists, creates it if it doesn't.
- :param group_name: the name of the group to validate/create
- :param parent_name: the name of the parent group for group_name
- :param group_description: a short description of the server group (used when creating)
- :return: (changed, group) -
- changed: Boolean- whether a change was made,
- group: A clc group object for the group
- """
- if not self.root_group:
- raise AssertionError("Implementation Error: Root Group not set")
- parent = parent_name if parent_name is not None else self.root_group.name
- description = group_description
- changed = False
- group = group_name
-
- parent_exists = self._group_exists(group_name=parent, parent_name=None)
- child_exists = self._group_exists(
- group_name=group_name,
- parent_name=parent)
-
- if parent_exists and child_exists:
- group, parent = self.group_dict[group_name]
- changed = False
- elif parent_exists and not child_exists:
- if not self.module.check_mode:
- group = self._create_group(
- group=group,
- parent=parent,
- description=description)
- changed = True
- else:
- self.module.fail_json(
- msg="parent group: " +
- parent +
- " does not exist")
-
- return changed, group
-
- def _create_group(self, group, parent, description):
- """
- Create the provided server group
- :param group: clc_sdk.Group - the group to create
- :param parent: clc_sdk.Parent - the parent group for {group}
- :param description: string - a text description of the group
- :return: clc_sdk.Group - the created group
- """
- response = None
- (parent, grandparent) = self.group_dict[parent]
- try:
- response = parent.Create(name=group, description=description)
- except CLCException as ex:
- self.module.fail_json(msg='Failed to create group :{0}. {1}'.format(
- group, ex.response_text))
- return response
-
- def _group_exists(self, group_name, parent_name):
- """
- Check to see if a group exists
- :param group_name: string - the group to check
- :param parent_name: string - the parent of group_name
- :return: boolean - whether the group exists
- """
- result = False
- if group_name in self.group_dict:
- (group, parent) = self.group_dict[group_name]
- if parent_name is None or parent_name == parent.name:
- result = True
- return result
-
- def _get_group_tree_for_datacenter(self, datacenter=None):
- """
- Walk the tree of groups for a datacenter
- :param datacenter: string - the datacenter to walk (ex: 'UC1')
- :return: a dictionary of groups and parents
- """
- self.root_group = self.clc.v2.Datacenter(
- location=datacenter).RootGroup()
- return self._walk_groups_recursive(
- parent_group=None,
- child_group=self.root_group)
-
- def _walk_groups_recursive(self, parent_group, child_group):
- """
- Walk a parent-child tree of groups, starting with the provided child group
- :param parent_group: clc_sdk.Group - the parent group to start the walk
- :param child_group: clc_sdk.Group - the child group to start the walk
- :return: a dictionary of groups and parents
- """
- result = {str(child_group): (child_group, parent_group)}
- groups = child_group.Subgroups().groups
- if len(groups) > 0:
- for group in groups:
- if group.type != 'default':
- continue
-
- result.update(self._walk_groups_recursive(child_group, group))
- return result
-
- def _wait_for_requests_to_complete(self, requests_lst):
- """
- Waits until the CLC requests are complete if the wait argument is True
- :param requests_lst: The list of CLC request objects
- :return: none
- """
- if not self.module.params['wait']:
- return
- for request in requests_lst:
- request.WaitUntilComplete()
- for request_details in request.requests:
- if request_details.Status() != 'succeeded':
- self.module.fail_json(
- msg='Unable to process group request')
-
- @staticmethod
- def _set_user_agent(clc):
- if hasattr(clc, 'SetRequestsSession'):
- agent_string = "ClcAnsibleModule/" + __version__
- ses = requests.Session()
- ses.headers.update({"Api-Client": agent_string})
- ses.headers['User-Agent'] += " " + agent_string
- clc.SetRequestsSession(ses)
-
-
-def main():
- """
- The main function. Instantiates the module and calls process_request.
- :return: none
- """
- module = AnsibleModule(
- argument_spec=ClcGroup._define_module_argument_spec(),
- supports_check_mode=True)
-
- clc_group = ClcGroup(module)
- clc_group.process_request()
-
-
-if __name__ == '__main__':
- main()
diff --git a/plugins/modules/clc_loadbalancer.py b/plugins/modules/clc_loadbalancer.py
deleted file mode 100644
index 675cc1100e..0000000000
--- a/plugins/modules/clc_loadbalancer.py
+++ /dev/null
@@ -1,945 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-#
-# Copyright (c) 2015 CenturyLink
-#
-# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
-module: clc_loadbalancer
-short_description: Create, Delete shared loadbalancers in CenturyLink Cloud
-description:
- - An Ansible module to Create, Delete shared loadbalancers in CenturyLink Cloud.
-extends_documentation_fragment:
- - community.general.attributes
-attributes:
- check_mode:
- support: full
- diff_mode:
- support: none
-options:
- name:
- description:
- - The name of the loadbalancer
- type: str
- required: true
- description:
- description:
- - A description for the loadbalancer
- type: str
- alias:
- description:
- - The alias of your CLC Account
- type: str
- required: true
- location:
- description:
- - The location of the datacenter where the load balancer resides in
- type: str
- required: true
- method:
- description:
- -The balancing method for the load balancer pool
- type: str
- choices: ['leastConnection', 'roundRobin']
- persistence:
- description:
- - The persistence method for the load balancer
- type: str
- choices: ['standard', 'sticky']
- port:
- description:
- - Port to configure on the public-facing side of the load balancer pool
- type: str
- choices: ['80', '443']
- nodes:
- description:
- - A list of nodes that needs to be added to the load balancer pool
- type: list
- default: []
- elements: dict
- status:
- description:
- - The status of the loadbalancer
- type: str
- default: enabled
- choices: ['enabled', 'disabled']
- state:
- description:
- - Whether to create or delete the load balancer pool
- type: str
- default: present
- choices: ['present', 'absent', 'port_absent', 'nodes_present', 'nodes_absent']
-requirements:
- - python = 2.7
- - requests >= 2.5.0
- - clc-sdk
-author: "CLC Runner (@clc-runner)"
-notes:
- - To use this module, it is required to set the below environment variables which enables access to the
- Centurylink Cloud
- - CLC_V2_API_USERNAME, the account login id for the centurylink cloud
- - CLC_V2_API_PASSWORD, the account password for the centurylink cloud
- - Alternatively, the module accepts the API token and account alias. The API token can be generated using the
- CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login
- - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login
- - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud
- - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment.
-'''
-
-EXAMPLES = '''
-# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples
-- name: Create Loadbalancer
- hosts: localhost
- connection: local
- tasks:
- - name: Actually Create things
- community.general.clc_loadbalancer:
- name: test
- description: test
- alias: TEST
- location: WA1
- port: 443
- nodes:
- - ipAddress: 10.11.22.123
- privatePort: 80
- state: present
-
-- name: Add node to an existing loadbalancer pool
- hosts: localhost
- connection: local
- tasks:
- - name: Actually Create things
- community.general.clc_loadbalancer:
- name: test
- description: test
- alias: TEST
- location: WA1
- port: 443
- nodes:
- - ipAddress: 10.11.22.234
- privatePort: 80
- state: nodes_present
-
-- name: Remove node from an existing loadbalancer pool
- hosts: localhost
- connection: local
- tasks:
- - name: Actually Create things
- community.general.clc_loadbalancer:
- name: test
- description: test
- alias: TEST
- location: WA1
- port: 443
- nodes:
- - ipAddress: 10.11.22.234
- privatePort: 80
- state: nodes_absent
-
-- name: Delete LoadbalancerPool
- hosts: localhost
- connection: local
- tasks:
- - name: Actually Delete things
- community.general.clc_loadbalancer:
- name: test
- description: test
- alias: TEST
- location: WA1
- port: 443
- nodes:
- - ipAddress: 10.11.22.123
- privatePort: 80
- state: port_absent
-
-- name: Delete Loadbalancer
- hosts: localhost
- connection: local
- tasks:
- - name: Actually Delete things
- community.general.clc_loadbalancer:
- name: test
- description: test
- alias: TEST
- location: WA1
- port: 443
- nodes:
- - ipAddress: 10.11.22.123
- privatePort: 80
- state: absent
-'''
-
-RETURN = '''
-loadbalancer:
- description: The load balancer result object from CLC
- returned: success
- type: dict
- sample:
- {
- "description":"test-lb",
- "id":"ab5b18cb81e94ab9925b61d1ca043fb5",
- "ipAddress":"66.150.174.197",
- "links":[
- {
- "href":"/v2/sharedLoadBalancers/wfad/wa1/ab5b18cb81e94ab9925b61d1ca043fb5",
- "rel":"self",
- "verbs":[
- "GET",
- "PUT",
- "DELETE"
- ]
- },
- {
- "href":"/v2/sharedLoadBalancers/wfad/wa1/ab5b18cb81e94ab9925b61d1ca043fb5/pools",
- "rel":"pools",
- "verbs":[
- "GET",
- "POST"
- ]
- }
- ],
- "name":"test-lb",
- "pools":[
-
- ],
- "status":"enabled"
- }
-'''
-
-__version__ = '${version}'
-
-import json
-import os
-import traceback
-from time import sleep
-
-from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
-
-REQUESTS_IMP_ERR = None
-try:
- import requests
-except ImportError:
- REQUESTS_IMP_ERR = traceback.format_exc()
- REQUESTS_FOUND = False
-else:
- REQUESTS_FOUND = True
-
-#
-# Requires the clc-python-sdk.
-# sudo pip install clc-sdk
-#
-CLC_IMP_ERR = None
-try:
- import clc as clc_sdk
- from clc import APIFailedResponse
-except ImportError:
- CLC_IMP_ERR = traceback.format_exc()
- CLC_FOUND = False
- clc_sdk = None
-else:
- CLC_FOUND = True
-
-from ansible.module_utils.basic import AnsibleModule, missing_required_lib
-
-
-class ClcLoadBalancer:
-
- clc = None
-
- def __init__(self, module):
- """
- Construct module
- """
- self.clc = clc_sdk
- self.module = module
- self.lb_dict = {}
-
- if not CLC_FOUND:
- self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR)
- if not REQUESTS_FOUND:
- self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR)
- if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'):
- self.module.fail_json(
- msg='requests library version should be >= 2.5.0')
-
- self._set_user_agent(self.clc)
-
- def process_request(self):
- """
- Execute the main code path, and handle the request
- :return: none
- """
- changed = False
- result_lb = None
- loadbalancer_name = self.module.params.get('name')
- loadbalancer_alias = self.module.params.get('alias')
- loadbalancer_location = self.module.params.get('location')
- loadbalancer_description = self.module.params.get('description')
- loadbalancer_port = self.module.params.get('port')
- loadbalancer_method = self.module.params.get('method')
- loadbalancer_persistence = self.module.params.get('persistence')
- loadbalancer_nodes = self.module.params.get('nodes')
- loadbalancer_status = self.module.params.get('status')
- state = self.module.params.get('state')
-
- if loadbalancer_description is None:
- loadbalancer_description = loadbalancer_name
-
- self._set_clc_credentials_from_env()
-
- self.lb_dict = self._get_loadbalancer_list(
- alias=loadbalancer_alias,
- location=loadbalancer_location)
-
- if state == 'present':
- changed, result_lb, lb_id = self.ensure_loadbalancer_present(
- name=loadbalancer_name,
- alias=loadbalancer_alias,
- location=loadbalancer_location,
- description=loadbalancer_description,
- status=loadbalancer_status)
- if loadbalancer_port:
- changed, result_pool, pool_id = self.ensure_loadbalancerpool_present(
- lb_id=lb_id,
- alias=loadbalancer_alias,
- location=loadbalancer_location,
- method=loadbalancer_method,
- persistence=loadbalancer_persistence,
- port=loadbalancer_port)
-
- if loadbalancer_nodes:
- changed, result_nodes = self.ensure_lbpool_nodes_set(
- alias=loadbalancer_alias,
- location=loadbalancer_location,
- name=loadbalancer_name,
- port=loadbalancer_port,
- nodes=loadbalancer_nodes)
- elif state == 'absent':
- changed, result_lb = self.ensure_loadbalancer_absent(
- name=loadbalancer_name,
- alias=loadbalancer_alias,
- location=loadbalancer_location)
-
- elif state == 'port_absent':
- changed, result_lb = self.ensure_loadbalancerpool_absent(
- alias=loadbalancer_alias,
- location=loadbalancer_location,
- name=loadbalancer_name,
- port=loadbalancer_port)
-
- elif state == 'nodes_present':
- changed, result_lb = self.ensure_lbpool_nodes_present(
- alias=loadbalancer_alias,
- location=loadbalancer_location,
- name=loadbalancer_name,
- port=loadbalancer_port,
- nodes=loadbalancer_nodes)
-
- elif state == 'nodes_absent':
- changed, result_lb = self.ensure_lbpool_nodes_absent(
- alias=loadbalancer_alias,
- location=loadbalancer_location,
- name=loadbalancer_name,
- port=loadbalancer_port,
- nodes=loadbalancer_nodes)
-
- self.module.exit_json(changed=changed, loadbalancer=result_lb)
-
- def ensure_loadbalancer_present(
- self, name, alias, location, description, status):
- """
- Checks to see if a load balancer exists and creates one if it does not.
- :param name: Name of loadbalancer
- :param alias: Alias of account
- :param location: Datacenter
- :param description: Description of loadbalancer
- :param status: Enabled / Disabled
- :return: (changed, result, lb_id)
- changed: Boolean whether a change was made
- result: The result object from the CLC load balancer request
- lb_id: The load balancer id
- """
- changed = False
- result = name
- lb_id = self._loadbalancer_exists(name=name)
- if not lb_id:
- if not self.module.check_mode:
- result = self.create_loadbalancer(name=name,
- alias=alias,
- location=location,
- description=description,
- status=status)
- lb_id = result.get('id')
- changed = True
-
- return changed, result, lb_id
-
- def ensure_loadbalancerpool_present(
- self, lb_id, alias, location, method, persistence, port):
- """
- Checks to see if a load balancer pool exists and creates one if it does not.
- :param lb_id: The loadbalancer id
- :param alias: The account alias
- :param location: the datacenter the load balancer resides in
- :param method: the load balancing method
- :param persistence: the load balancing persistence type
- :param port: the port that the load balancer will listen on
- :return: (changed, group, pool_id) -
- changed: Boolean whether a change was made
- result: The result from the CLC API call
- pool_id: The string id of the load balancer pool
- """
- changed = False
- result = port
- if not lb_id:
- return changed, None, None
- pool_id = self._loadbalancerpool_exists(
- alias=alias,
- location=location,
- port=port,
- lb_id=lb_id)
- if not pool_id:
- if not self.module.check_mode:
- result = self.create_loadbalancerpool(
- alias=alias,
- location=location,
- lb_id=lb_id,
- method=method,
- persistence=persistence,
- port=port)
- pool_id = result.get('id')
- changed = True
-
- return changed, result, pool_id
-
- def ensure_loadbalancer_absent(self, name, alias, location):
- """
- Checks to see if a load balancer exists and deletes it if it does
- :param name: Name of the load balancer
- :param alias: Alias of account
- :param location: Datacenter
- :return: (changed, result)
- changed: Boolean whether a change was made
- result: The result from the CLC API Call
- """
- changed = False
- result = name
- lb_exists = self._loadbalancer_exists(name=name)
- if lb_exists:
- if not self.module.check_mode:
- result = self.delete_loadbalancer(alias=alias,
- location=location,
- name=name)
- changed = True
- return changed, result
-
- def ensure_loadbalancerpool_absent(self, alias, location, name, port):
- """
- Checks to see if a load balancer pool exists and deletes it if it does
- :param alias: The account alias
- :param location: the datacenter the load balancer resides in
- :param name: the name of the load balancer
- :param port: the port that the load balancer listens on
- :return: (changed, result) -
- changed: Boolean whether a change was made
- result: The result from the CLC API call
- """
- changed = False
- result = None
- lb_exists = self._loadbalancer_exists(name=name)
- if lb_exists:
- lb_id = self._get_loadbalancer_id(name=name)
- pool_id = self._loadbalancerpool_exists(
- alias=alias,
- location=location,
- port=port,
- lb_id=lb_id)
- if pool_id:
- changed = True
- if not self.module.check_mode:
- result = self.delete_loadbalancerpool(
- alias=alias,
- location=location,
- lb_id=lb_id,
- pool_id=pool_id)
- else:
- result = "Pool doesn't exist"
- else:
- result = "LB Doesn't Exist"
- return changed, result
-
- def ensure_lbpool_nodes_set(self, alias, location, name, port, nodes):
- """
- Checks to see if the provided list of nodes exist for the pool
- and set the nodes if any in the list those doesn't exist
- :param alias: The account alias
- :param location: the datacenter the load balancer resides in
- :param name: the name of the load balancer
- :param port: the port that the load balancer will listen on
- :param nodes: The list of nodes to be updated to the pool
- :return: (changed, result) -
- changed: Boolean whether a change was made
- result: The result from the CLC API call
- """
- result = {}
- changed = False
- lb_exists = self._loadbalancer_exists(name=name)
- if lb_exists:
- lb_id = self._get_loadbalancer_id(name=name)
- pool_id = self._loadbalancerpool_exists(
- alias=alias,
- location=location,
- port=port,
- lb_id=lb_id)
- if pool_id:
- nodes_exist = self._loadbalancerpool_nodes_exists(alias=alias,
- location=location,
- lb_id=lb_id,
- pool_id=pool_id,
- nodes_to_check=nodes)
- if not nodes_exist:
- changed = True
- result = self.set_loadbalancernodes(alias=alias,
- location=location,
- lb_id=lb_id,
- pool_id=pool_id,
- nodes=nodes)
- else:
- result = "Pool doesn't exist"
- else:
- result = "Load balancer doesn't Exist"
- return changed, result
-
- def ensure_lbpool_nodes_present(self, alias, location, name, port, nodes):
- """
- Checks to see if the provided list of nodes exist for the pool and add the missing nodes to the pool
- :param alias: The account alias
- :param location: the datacenter the load balancer resides in
- :param name: the name of the load balancer
- :param port: the port that the load balancer will listen on
- :param nodes: the list of nodes to be added
- :return: (changed, result) -
- changed: Boolean whether a change was made
- result: The result from the CLC API call
- """
- changed = False
- lb_exists = self._loadbalancer_exists(name=name)
- if lb_exists:
- lb_id = self._get_loadbalancer_id(name=name)
- pool_id = self._loadbalancerpool_exists(
- alias=alias,
- location=location,
- port=port,
- lb_id=lb_id)
- if pool_id:
- changed, result = self.add_lbpool_nodes(alias=alias,
- location=location,
- lb_id=lb_id,
- pool_id=pool_id,
- nodes_to_add=nodes)
- else:
- result = "Pool doesn't exist"
- else:
- result = "Load balancer doesn't Exist"
- return changed, result
-
- def ensure_lbpool_nodes_absent(self, alias, location, name, port, nodes):
- """
- Checks to see if the provided list of nodes exist for the pool and removes them if found any
- :param alias: The account alias
- :param location: the datacenter the load balancer resides in
- :param name: the name of the load balancer
- :param port: the port that the load balancer will listen on
- :param nodes: the list of nodes to be removed
- :return: (changed, result) -
- changed: Boolean whether a change was made
- result: The result from the CLC API call
- """
- changed = False
- lb_exists = self._loadbalancer_exists(name=name)
- if lb_exists:
- lb_id = self._get_loadbalancer_id(name=name)
- pool_id = self._loadbalancerpool_exists(
- alias=alias,
- location=location,
- port=port,
- lb_id=lb_id)
- if pool_id:
- changed, result = self.remove_lbpool_nodes(alias=alias,
- location=location,
- lb_id=lb_id,
- pool_id=pool_id,
- nodes_to_remove=nodes)
- else:
- result = "Pool doesn't exist"
- else:
- result = "Load balancer doesn't Exist"
- return changed, result
-
- def create_loadbalancer(self, name, alias, location, description, status):
- """
- Create a loadbalancer w/ params
- :param name: Name of loadbalancer
- :param alias: Alias of account
- :param location: Datacenter
- :param description: Description for loadbalancer to be created
- :param status: Enabled / Disabled
- :return: result: The result from the CLC API call
- """
- result = None
- try:
- result = self.clc.v2.API.Call('POST',
- '/v2/sharedLoadBalancers/%s/%s' % (alias,
- location),
- json.dumps({"name": name,
- "description": description,
- "status": status}))
- sleep(1)
- except APIFailedResponse as e:
- self.module.fail_json(
- msg='Unable to create load balancer "{0}". {1}'.format(
- name, str(e.response_text)))
- return result
-
- def create_loadbalancerpool(
- self, alias, location, lb_id, method, persistence, port):
- """
- Creates a pool on the provided load balancer
- :param alias: the account alias
- :param location: the datacenter the load balancer resides in
- :param lb_id: the id string of the load balancer
- :param method: the load balancing method
- :param persistence: the load balancing persistence type
- :param port: the port that the load balancer will listen on
- :return: result: The result from the create API call
- """
- result = None
- try:
- result = self.clc.v2.API.Call(
- 'POST', '/v2/sharedLoadBalancers/%s/%s/%s/pools' %
- (alias, location, lb_id), json.dumps(
- {
- "port": port, "method": method, "persistence": persistence
- }))
- except APIFailedResponse as e:
- self.module.fail_json(
- msg='Unable to create pool for load balancer id "{0}". {1}'.format(
- lb_id, str(e.response_text)))
- return result
-
- def delete_loadbalancer(self, alias, location, name):
- """
- Delete CLC loadbalancer
- :param alias: Alias for account
- :param location: Datacenter
- :param name: Name of the loadbalancer to delete
- :return: result: The result from the CLC API call
- """
- result = None
- lb_id = self._get_loadbalancer_id(name=name)
- try:
- result = self.clc.v2.API.Call(
- 'DELETE', '/v2/sharedLoadBalancers/%s/%s/%s' %
- (alias, location, lb_id))
- except APIFailedResponse as e:
- self.module.fail_json(
- msg='Unable to delete load balancer "{0}". {1}'.format(
- name, str(e.response_text)))
- return result
-
- def delete_loadbalancerpool(self, alias, location, lb_id, pool_id):
- """
- Delete the pool on the provided load balancer
- :param alias: The account alias
- :param location: the datacenter the load balancer resides in
- :param lb_id: the id string of the load balancer
- :param pool_id: the id string of the load balancer pool
- :return: result: The result from the delete API call
- """
- result = None
- try:
- result = self.clc.v2.API.Call(
- 'DELETE', '/v2/sharedLoadBalancers/%s/%s/%s/pools/%s' %
- (alias, location, lb_id, pool_id))
- except APIFailedResponse as e:
- self.module.fail_json(
- msg='Unable to delete pool for load balancer id "{0}". {1}'.format(
- lb_id, str(e.response_text)))
- return result
-
- def _get_loadbalancer_id(self, name):
- """
- Retrieves unique ID of loadbalancer
- :param name: Name of loadbalancer
- :return: Unique ID of the loadbalancer
- """
- id = None
- for lb in self.lb_dict:
- if lb.get('name') == name:
- id = lb.get('id')
- return id
-
- def _get_loadbalancer_list(self, alias, location):
- """
- Retrieve a list of loadbalancers
- :param alias: Alias for account
- :param location: Datacenter
- :return: JSON data for all loadbalancers at datacenter
- """
- result = None
- try:
- result = self.clc.v2.API.Call(
- 'GET', '/v2/sharedLoadBalancers/%s/%s' % (alias, location))
- except APIFailedResponse as e:
- self.module.fail_json(
- msg='Unable to fetch load balancers for account: {0}. {1}'.format(
- alias, str(e.response_text)))
- return result
-
- def _loadbalancer_exists(self, name):
- """
- Verify a loadbalancer exists
- :param name: Name of loadbalancer
- :return: False or the ID of the existing loadbalancer
- """
- result = False
-
- for lb in self.lb_dict:
- if lb.get('name') == name:
- result = lb.get('id')
- return result
-
- def _loadbalancerpool_exists(self, alias, location, port, lb_id):
- """
- Checks to see if a pool exists on the specified port on the provided load balancer
- :param alias: the account alias
- :param location: the datacenter the load balancer resides in
- :param port: the port to check and see if it exists
- :param lb_id: the id string of the provided load balancer
- :return: result: The id string of the pool or False
- """
- result = False
- try:
- pool_list = self.clc.v2.API.Call(
- 'GET', '/v2/sharedLoadBalancers/%s/%s/%s/pools' %
- (alias, location, lb_id))
- except APIFailedResponse as e:
- return self.module.fail_json(
- msg='Unable to fetch the load balancer pools for for load balancer id: {0}. {1}'.format(
- lb_id, str(e.response_text)))
- for pool in pool_list:
- if int(pool.get('port')) == int(port):
- result = pool.get('id')
- return result
-
- def _loadbalancerpool_nodes_exists(
- self, alias, location, lb_id, pool_id, nodes_to_check):
- """
- Checks to see if a set of nodes exists on the specified port on the provided load balancer
- :param alias: the account alias
- :param location: the datacenter the load balancer resides in
- :param lb_id: the id string of the provided load balancer
- :param pool_id: the id string of the load balancer pool
- :param nodes_to_check: the list of nodes to check for
- :return: result: True / False indicating if the given nodes exist
- """
- result = False
- nodes = self._get_lbpool_nodes(alias, location, lb_id, pool_id)
- for node in nodes_to_check:
- if not node.get('status'):
- node['status'] = 'enabled'
- if node in nodes:
- result = True
- else:
- result = False
- return result
-
- def set_loadbalancernodes(self, alias, location, lb_id, pool_id, nodes):
- """
- Updates nodes to the provided pool
- :param alias: the account alias
- :param location: the datacenter the load balancer resides in
- :param lb_id: the id string of the load balancer
- :param pool_id: the id string of the pool
- :param nodes: a list of dictionaries containing the nodes to set
- :return: result: The result from the CLC API call
- """
- result = None
- if not lb_id:
- return result
- if not self.module.check_mode:
- try:
- result = self.clc.v2.API.Call('PUT',
- '/v2/sharedLoadBalancers/%s/%s/%s/pools/%s/nodes'
- % (alias, location, lb_id, pool_id), json.dumps(nodes))
- except APIFailedResponse as e:
- self.module.fail_json(
- msg='Unable to set nodes for the load balancer pool id "{0}". {1}'.format(
- pool_id, str(e.response_text)))
- return result
-
- def add_lbpool_nodes(self, alias, location, lb_id, pool_id, nodes_to_add):
- """
- Add nodes to the provided pool
- :param alias: the account alias
- :param location: the datacenter the load balancer resides in
- :param lb_id: the id string of the load balancer
- :param pool_id: the id string of the pool
- :param nodes_to_add: a list of dictionaries containing the nodes to add
- :return: (changed, result) -
- changed: Boolean whether a change was made
- result: The result from the CLC API call
- """
- changed = False
- result = {}
- nodes = self._get_lbpool_nodes(alias, location, lb_id, pool_id)
- for node in nodes_to_add:
- if not node.get('status'):
- node['status'] = 'enabled'
- if node not in nodes:
- changed = True
- nodes.append(node)
- if changed is True and not self.module.check_mode:
- result = self.set_loadbalancernodes(
- alias,
- location,
- lb_id,
- pool_id,
- nodes)
- return changed, result
-
- def remove_lbpool_nodes(
- self, alias, location, lb_id, pool_id, nodes_to_remove):
- """
- Removes nodes from the provided pool
- :param alias: the account alias
- :param location: the datacenter the load balancer resides in
- :param lb_id: the id string of the load balancer
- :param pool_id: the id string of the pool
- :param nodes_to_remove: a list of dictionaries containing the nodes to remove
- :return: (changed, result) -
- changed: Boolean whether a change was made
- result: The result from the CLC API call
- """
- changed = False
- result = {}
- nodes = self._get_lbpool_nodes(alias, location, lb_id, pool_id)
- for node in nodes_to_remove:
- if not node.get('status'):
- node['status'] = 'enabled'
- if node in nodes:
- changed = True
- nodes.remove(node)
- if changed is True and not self.module.check_mode:
- result = self.set_loadbalancernodes(
- alias,
- location,
- lb_id,
- pool_id,
- nodes)
- return changed, result
-
- def _get_lbpool_nodes(self, alias, location, lb_id, pool_id):
- """
- Return the list of nodes available to the provided load balancer pool
- :param alias: the account alias
- :param location: the datacenter the load balancer resides in
- :param lb_id: the id string of the load balancer
- :param pool_id: the id string of the pool
- :return: result: The list of nodes
- """
- result = None
- try:
- result = self.clc.v2.API.Call('GET',
- '/v2/sharedLoadBalancers/%s/%s/%s/pools/%s/nodes'
- % (alias, location, lb_id, pool_id))
- except APIFailedResponse as e:
- self.module.fail_json(
- msg='Unable to fetch list of available nodes for load balancer pool id: {0}. {1}'.format(
- pool_id, str(e.response_text)))
- return result
-
- @staticmethod
- def define_argument_spec():
- """
- Define the argument spec for the ansible module
- :return: argument spec dictionary
- """
- argument_spec = dict(
- name=dict(required=True),
- description=dict(),
- location=dict(required=True),
- alias=dict(required=True),
- port=dict(choices=[80, 443]),
- method=dict(choices=['leastConnection', 'roundRobin']),
- persistence=dict(choices=['standard', 'sticky']),
- nodes=dict(type='list', default=[], elements='dict'),
- status=dict(default='enabled', choices=['enabled', 'disabled']),
- state=dict(
- default='present',
- choices=[
- 'present',
- 'absent',
- 'port_absent',
- 'nodes_present',
- 'nodes_absent'])
- )
- return argument_spec
-
- def _set_clc_credentials_from_env(self):
- """
- Set the CLC Credentials on the sdk by reading environment variables
- :return: none
- """
- env = os.environ
- v2_api_token = env.get('CLC_V2_API_TOKEN', False)
- v2_api_username = env.get('CLC_V2_API_USERNAME', False)
- v2_api_passwd = env.get('CLC_V2_API_PASSWD', False)
- clc_alias = env.get('CLC_ACCT_ALIAS', False)
- api_url = env.get('CLC_V2_API_URL', False)
-
- if api_url:
- self.clc.defaults.ENDPOINT_URL_V2 = api_url
-
- if v2_api_token and clc_alias:
- self.clc._LOGIN_TOKEN_V2 = v2_api_token
- self.clc._V2_ENABLED = True
- self.clc.ALIAS = clc_alias
- elif v2_api_username and v2_api_passwd:
- self.clc.v2.SetCredentials(
- api_username=v2_api_username,
- api_passwd=v2_api_passwd)
- else:
- return self.module.fail_json(
- msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD "
- "environment variables")
-
- @staticmethod
- def _set_user_agent(clc):
- if hasattr(clc, 'SetRequestsSession'):
- agent_string = "ClcAnsibleModule/" + __version__
- ses = requests.Session()
- ses.headers.update({"Api-Client": agent_string})
- ses.headers['User-Agent'] += " " + agent_string
- clc.SetRequestsSession(ses)
-
-
-def main():
- """
- The main function. Instantiates the module and calls process_request.
- :return: none
- """
- module = AnsibleModule(argument_spec=ClcLoadBalancer.define_argument_spec(),
- supports_check_mode=True)
- clc_loadbalancer = ClcLoadBalancer(module)
- clc_loadbalancer.process_request()
-
-
-if __name__ == '__main__':
- main()
diff --git a/plugins/modules/clc_modify_server.py b/plugins/modules/clc_modify_server.py
deleted file mode 100644
index b375d9d47a..0000000000
--- a/plugins/modules/clc_modify_server.py
+++ /dev/null
@@ -1,975 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-#
-# Copyright (c) 2015 CenturyLink
-# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
-module: clc_modify_server
-short_description: Modify servers in CenturyLink Cloud
-description:
- - An Ansible module to modify servers in CenturyLink Cloud.
-extends_documentation_fragment:
- - community.general.attributes
-attributes:
- check_mode:
- support: full
- diff_mode:
- support: none
-options:
- server_ids:
- description:
- - A list of server Ids to modify.
- type: list
- required: true
- elements: str
- cpu:
- description:
- - How many CPUs to update on the server
- type: str
- memory:
- description:
- - Memory (in GB) to set to the server.
- type: str
- anti_affinity_policy_id:
- description:
- - The anti affinity policy id to be set for a hyper scale server.
- This is mutually exclusive with 'anti_affinity_policy_name'
- type: str
- anti_affinity_policy_name:
- description:
- - The anti affinity policy name to be set for a hyper scale server.
- This is mutually exclusive with 'anti_affinity_policy_id'
- type: str
- alert_policy_id:
- description:
- - The alert policy id to be associated to the server.
- This is mutually exclusive with 'alert_policy_name'
- type: str
- alert_policy_name:
- description:
- - The alert policy name to be associated to the server.
- This is mutually exclusive with 'alert_policy_id'
- type: str
- state:
- description:
- - The state to insure that the provided resources are in.
- type: str
- default: 'present'
- choices: ['present', 'absent']
- wait:
- description:
- - Whether to wait for the provisioning tasks to finish before returning.
- type: bool
- default: true
-requirements:
- - python = 2.7
- - requests >= 2.5.0
- - clc-sdk
-author: "CLC Runner (@clc-runner)"
-notes:
- - To use this module, it is required to set the below environment variables which enables access to the
- Centurylink Cloud
- - CLC_V2_API_USERNAME, the account login id for the centurylink cloud
- - CLC_V2_API_PASSWORD, the account password for the centurylink cloud
- - Alternatively, the module accepts the API token and account alias. The API token can be generated using the
- CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login
- - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login
- - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud
- - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment.
-'''
-
-EXAMPLES = '''
-# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples
-
-- name: Set the cpu count to 4 on a server
- community.general.clc_modify_server:
- server_ids:
- - UC1TESTSVR01
- - UC1TESTSVR02
- cpu: 4
- state: present
-
-- name: Set the memory to 8GB on a server
- community.general.clc_modify_server:
- server_ids:
- - UC1TESTSVR01
- - UC1TESTSVR02
- memory: 8
- state: present
-
-- name: Set the anti affinity policy on a server
- community.general.clc_modify_server:
- server_ids:
- - UC1TESTSVR01
- - UC1TESTSVR02
- anti_affinity_policy_name: 'aa_policy'
- state: present
-
-- name: Remove the anti affinity policy on a server
- community.general.clc_modify_server:
- server_ids:
- - UC1TESTSVR01
- - UC1TESTSVR02
- anti_affinity_policy_name: 'aa_policy'
- state: absent
-
-- name: Add the alert policy on a server
- community.general.clc_modify_server:
- server_ids:
- - UC1TESTSVR01
- - UC1TESTSVR02
- alert_policy_name: 'alert_policy'
- state: present
-
-- name: Remove the alert policy on a server
- community.general.clc_modify_server:
- server_ids:
- - UC1TESTSVR01
- - UC1TESTSVR02
- alert_policy_name: 'alert_policy'
- state: absent
-
-- name: Ret the memory to 16GB and cpu to 8 core on a lust if servers
- community.general.clc_modify_server:
- server_ids:
- - UC1TESTSVR01
- - UC1TESTSVR02
- cpu: 8
- memory: 16
- state: present
-'''
-
-RETURN = '''
-server_ids:
- description: The list of server ids that are changed
- returned: success
- type: list
- sample:
- [
- "UC1TEST-SVR01",
- "UC1TEST-SVR02"
- ]
-servers:
- description: The list of server objects that are changed
- returned: success
- type: list
- sample:
- [
- {
- "changeInfo":{
- "createdBy":"service.wfad",
- "createdDate":1438196820,
- "modifiedBy":"service.wfad",
- "modifiedDate":1438196820
- },
- "description":"test-server",
- "details":{
- "alertPolicies":[
-
- ],
- "cpu":1,
- "customFields":[
-
- ],
- "diskCount":3,
- "disks":[
- {
- "id":"0:0",
- "partitionPaths":[
-
- ],
- "sizeGB":1
- },
- {
- "id":"0:1",
- "partitionPaths":[
-
- ],
- "sizeGB":2
- },
- {
- "id":"0:2",
- "partitionPaths":[
-
- ],
- "sizeGB":14
- }
- ],
- "hostName":"",
- "inMaintenanceMode":false,
- "ipAddresses":[
- {
- "internal":"10.1.1.1"
- }
- ],
- "memoryGB":1,
- "memoryMB":1024,
- "partitions":[
-
- ],
- "powerState":"started",
- "snapshots":[
-
- ],
- "storageGB":17
- },
- "groupId":"086ac1dfe0b6411989e8d1b77c4065f0",
- "id":"test-server",
- "ipaddress":"10.120.45.23",
- "isTemplate":false,
- "links":[
- {
- "href":"/v2/servers/wfad/test-server",
- "id":"test-server",
- "rel":"self",
- "verbs":[
- "GET",
- "PATCH",
- "DELETE"
- ]
- },
- {
- "href":"/v2/groups/wfad/086ac1dfe0b6411989e8d1b77c4065f0",
- "id":"086ac1dfe0b6411989e8d1b77c4065f0",
- "rel":"group"
- },
- {
- "href":"/v2/accounts/wfad",
- "id":"wfad",
- "rel":"account"
- },
- {
- "href":"/v2/billing/wfad/serverPricing/test-server",
- "rel":"billing"
- },
- {
- "href":"/v2/servers/wfad/test-server/publicIPAddresses",
- "rel":"publicIPAddresses",
- "verbs":[
- "POST"
- ]
- },
- {
- "href":"/v2/servers/wfad/test-server/credentials",
- "rel":"credentials"
- },
- {
- "href":"/v2/servers/wfad/test-server/statistics",
- "rel":"statistics"
- },
- {
- "href":"/v2/servers/wfad/510ec21ae82d4dc89d28479753bf736a/upcomingScheduledActivities",
- "rel":"upcomingScheduledActivities"
- },
- {
- "href":"/v2/servers/wfad/510ec21ae82d4dc89d28479753bf736a/scheduledActivities",
- "rel":"scheduledActivities",
- "verbs":[
- "GET",
- "POST"
- ]
- },
- {
- "href":"/v2/servers/wfad/test-server/capabilities",
- "rel":"capabilities"
- },
- {
- "href":"/v2/servers/wfad/test-server/alertPolicies",
- "rel":"alertPolicyMappings",
- "verbs":[
- "POST"
- ]
- },
- {
- "href":"/v2/servers/wfad/test-server/antiAffinityPolicy",
- "rel":"antiAffinityPolicyMapping",
- "verbs":[
- "PUT",
- "DELETE"
- ]
- },
- {
- "href":"/v2/servers/wfad/test-server/cpuAutoscalePolicy",
- "rel":"cpuAutoscalePolicyMapping",
- "verbs":[
- "PUT",
- "DELETE"
- ]
- }
- ],
- "locationId":"UC1",
- "name":"test-server",
- "os":"ubuntu14_64Bit",
- "osType":"Ubuntu 14 64-bit",
- "status":"active",
- "storageType":"standard",
- "type":"standard"
- }
- ]
-'''
-
-__version__ = '${version}'
-
-import json
-import os
-import traceback
-
-from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
-
-REQUESTS_IMP_ERR = None
-try:
- import requests
-except ImportError:
- REQUESTS_IMP_ERR = traceback.format_exc()
- REQUESTS_FOUND = False
-else:
- REQUESTS_FOUND = True
-
-#
-# Requires the clc-python-sdk.
-# sudo pip install clc-sdk
-#
-CLC_IMP_ERR = None
-try:
- import clc as clc_sdk
- from clc import CLCException
- from clc import APIFailedResponse
-except ImportError:
- CLC_IMP_ERR = traceback.format_exc()
- CLC_FOUND = False
- clc_sdk = None
-else:
- CLC_FOUND = True
-
-from ansible.module_utils.basic import AnsibleModule, missing_required_lib
-
-
-class ClcModifyServer:
- clc = clc_sdk
-
- def __init__(self, module):
- """
- Construct module
- """
- self.clc = clc_sdk
- self.module = module
-
- if not CLC_FOUND:
- self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR)
- if not REQUESTS_FOUND:
- self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR)
- if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'):
- self.module.fail_json(
- msg='requests library version should be >= 2.5.0')
-
- self._set_user_agent(self.clc)
-
- def process_request(self):
- """
- Process the request - Main Code Path
- :return: Returns with either an exit_json or fail_json
- """
- self._set_clc_credentials_from_env()
-
- p = self.module.params
- cpu = p.get('cpu')
- memory = p.get('memory')
- state = p.get('state')
- if state == 'absent' and (cpu or memory):
- return self.module.fail_json(
- msg='\'absent\' state is not supported for \'cpu\' and \'memory\' arguments')
-
- server_ids = p['server_ids']
- if not isinstance(server_ids, list):
- return self.module.fail_json(
- msg='server_ids needs to be a list of instances to modify: %s' %
- server_ids)
-
- (changed, server_dict_array, changed_server_ids) = self._modify_servers(
- server_ids=server_ids)
-
- self.module.exit_json(
- changed=changed,
- server_ids=changed_server_ids,
- servers=server_dict_array)
-
- @staticmethod
- def _define_module_argument_spec():
- """
- Define the argument spec for the ansible module
- :return: argument spec dictionary
- """
- argument_spec = dict(
- server_ids=dict(type='list', required=True, elements='str'),
- state=dict(default='present', choices=['present', 'absent']),
- cpu=dict(),
- memory=dict(),
- anti_affinity_policy_id=dict(),
- anti_affinity_policy_name=dict(),
- alert_policy_id=dict(),
- alert_policy_name=dict(),
- wait=dict(type='bool', default=True)
- )
- mutually_exclusive = [
- ['anti_affinity_policy_id', 'anti_affinity_policy_name'],
- ['alert_policy_id', 'alert_policy_name']
- ]
- return {"argument_spec": argument_spec,
- "mutually_exclusive": mutually_exclusive}
-
- def _set_clc_credentials_from_env(self):
- """
- Set the CLC Credentials on the sdk by reading environment variables
- :return: none
- """
- env = os.environ
- v2_api_token = env.get('CLC_V2_API_TOKEN', False)
- v2_api_username = env.get('CLC_V2_API_USERNAME', False)
- v2_api_passwd = env.get('CLC_V2_API_PASSWD', False)
- clc_alias = env.get('CLC_ACCT_ALIAS', False)
- api_url = env.get('CLC_V2_API_URL', False)
-
- if api_url:
- self.clc.defaults.ENDPOINT_URL_V2 = api_url
-
- if v2_api_token and clc_alias:
- self.clc._LOGIN_TOKEN_V2 = v2_api_token
- self.clc._V2_ENABLED = True
- self.clc.ALIAS = clc_alias
- elif v2_api_username and v2_api_passwd:
- self.clc.v2.SetCredentials(
- api_username=v2_api_username,
- api_passwd=v2_api_passwd)
- else:
- return self.module.fail_json(
- msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD "
- "environment variables")
-
- def _get_servers_from_clc(self, server_list, message):
- """
- Internal function to fetch list of CLC server objects from a list of server ids
- :param server_list: The list of server ids
- :param message: the error message to throw in case of any error
- :return the list of CLC server objects
- """
- try:
- return self.clc.v2.Servers(server_list).servers
- except CLCException as ex:
- return self.module.fail_json(msg=message + ': %s' % ex.message)
-
- def _modify_servers(self, server_ids):
- """
- modify the servers configuration on the provided list
- :param server_ids: list of servers to modify
- :return: a list of dictionaries with server information about the servers that were modified
- """
- p = self.module.params
- state = p.get('state')
- server_params = {
- 'cpu': p.get('cpu'),
- 'memory': p.get('memory'),
- 'anti_affinity_policy_id': p.get('anti_affinity_policy_id'),
- 'anti_affinity_policy_name': p.get('anti_affinity_policy_name'),
- 'alert_policy_id': p.get('alert_policy_id'),
- 'alert_policy_name': p.get('alert_policy_name'),
- }
- changed = False
- server_changed = False
- aa_changed = False
- ap_changed = False
- server_dict_array = []
- result_server_ids = []
- request_list = []
- changed_servers = []
-
- if not isinstance(server_ids, list) or len(server_ids) < 1:
- return self.module.fail_json(
- msg='server_ids should be a list of servers, aborting')
-
- servers = self._get_servers_from_clc(
- server_ids,
- 'Failed to obtain server list from the CLC API')
- for server in servers:
- if state == 'present':
- server_changed, server_result = self._ensure_server_config(
- server, server_params)
- if server_result:
- request_list.append(server_result)
- aa_changed = self._ensure_aa_policy_present(
- server,
- server_params)
- ap_changed = self._ensure_alert_policy_present(
- server,
- server_params)
- elif state == 'absent':
- aa_changed = self._ensure_aa_policy_absent(
- server,
- server_params)
- ap_changed = self._ensure_alert_policy_absent(
- server,
- server_params)
- if server_changed or aa_changed or ap_changed:
- changed_servers.append(server)
- changed = True
-
- self._wait_for_requests(self.module, request_list)
- self._refresh_servers(self.module, changed_servers)
-
- for server in changed_servers:
- server_dict_array.append(server.data)
- result_server_ids.append(server.id)
-
- return changed, server_dict_array, result_server_ids
-
- def _ensure_server_config(
- self, server, server_params):
- """
- ensures the server is updated with the provided cpu and memory
- :param server: the CLC server object
- :param server_params: the dictionary of server parameters
- :return: (changed, group) -
- changed: Boolean whether a change was made
- result: The result from the CLC API call
- """
- cpu = server_params.get('cpu')
- memory = server_params.get('memory')
- changed = False
- result = None
-
- if not cpu:
- cpu = server.cpu
- if not memory:
- memory = server.memory
- if memory != server.memory or cpu != server.cpu:
- if not self.module.check_mode:
- result = self._modify_clc_server(
- self.clc,
- self.module,
- server.id,
- cpu,
- memory)
- changed = True
- return changed, result
-
- @staticmethod
- def _modify_clc_server(clc, module, server_id, cpu, memory):
- """
- Modify the memory or CPU of a clc server.
- :param clc: the clc-sdk instance to use
- :param module: the AnsibleModule object
- :param server_id: id of the server to modify
- :param cpu: the new cpu value
- :param memory: the new memory value
- :return: the result of CLC API call
- """
- result = None
- acct_alias = clc.v2.Account.GetAlias()
- try:
- # Update the server configuration
- job_obj = clc.v2.API.Call('PATCH',
- 'servers/%s/%s' % (acct_alias,
- server_id),
- json.dumps([{"op": "set",
- "member": "memory",
- "value": memory},
- {"op": "set",
- "member": "cpu",
- "value": cpu}]))
- result = clc.v2.Requests(job_obj)
- except APIFailedResponse as ex:
- module.fail_json(
- msg='Unable to update the server configuration for server : "{0}". {1}'.format(
- server_id, str(ex.response_text)))
- return result
-
- @staticmethod
- def _wait_for_requests(module, request_list):
- """
- Block until server provisioning requests are completed.
- :param module: the AnsibleModule object
- :param request_list: a list of clc-sdk.Request instances
- :return: none
- """
- wait = module.params.get('wait')
- if wait:
- # Requests.WaitUntilComplete() returns the count of failed requests
- failed_requests_count = sum(
- [request.WaitUntilComplete() for request in request_list])
-
- if failed_requests_count > 0:
- module.fail_json(
- msg='Unable to process modify server request')
-
- @staticmethod
- def _refresh_servers(module, servers):
- """
- Loop through a list of servers and refresh them.
- :param module: the AnsibleModule object
- :param servers: list of clc-sdk.Server instances to refresh
- :return: none
- """
- for server in servers:
- try:
- server.Refresh()
- except CLCException as ex:
- module.fail_json(msg='Unable to refresh the server {0}. {1}'.format(
- server.id, ex.message
- ))
-
- def _ensure_aa_policy_present(
- self, server, server_params):
- """
- ensures the server is updated with the provided anti affinity policy
- :param server: the CLC server object
- :param server_params: the dictionary of server parameters
- :return: (changed, group) -
- changed: Boolean whether a change was made
- result: The result from the CLC API call
- """
- changed = False
- acct_alias = self.clc.v2.Account.GetAlias()
-
- aa_policy_id = server_params.get('anti_affinity_policy_id')
- aa_policy_name = server_params.get('anti_affinity_policy_name')
- if not aa_policy_id and aa_policy_name:
- aa_policy_id = self._get_aa_policy_id_by_name(
- self.clc,
- self.module,
- acct_alias,
- aa_policy_name)
- current_aa_policy_id = self._get_aa_policy_id_of_server(
- self.clc,
- self.module,
- acct_alias,
- server.id)
-
- if aa_policy_id and aa_policy_id != current_aa_policy_id:
- self._modify_aa_policy(
- self.clc,
- self.module,
- acct_alias,
- server.id,
- aa_policy_id)
- changed = True
- return changed
-
- def _ensure_aa_policy_absent(
- self, server, server_params):
- """
- ensures the provided anti affinity policy is removed from the server
- :param server: the CLC server object
- :param server_params: the dictionary of server parameters
- :return: (changed, group) -
- changed: Boolean whether a change was made
- result: The result from the CLC API call
- """
- changed = False
- acct_alias = self.clc.v2.Account.GetAlias()
- aa_policy_id = server_params.get('anti_affinity_policy_id')
- aa_policy_name = server_params.get('anti_affinity_policy_name')
- if not aa_policy_id and aa_policy_name:
- aa_policy_id = self._get_aa_policy_id_by_name(
- self.clc,
- self.module,
- acct_alias,
- aa_policy_name)
- current_aa_policy_id = self._get_aa_policy_id_of_server(
- self.clc,
- self.module,
- acct_alias,
- server.id)
-
- if aa_policy_id and aa_policy_id == current_aa_policy_id:
- self._delete_aa_policy(
- self.clc,
- self.module,
- acct_alias,
- server.id)
- changed = True
- return changed
-
- @staticmethod
- def _modify_aa_policy(clc, module, acct_alias, server_id, aa_policy_id):
- """
- modifies the anti affinity policy of the CLC server
- :param clc: the clc-sdk instance to use
- :param module: the AnsibleModule object
- :param acct_alias: the CLC account alias
- :param server_id: the CLC server id
- :param aa_policy_id: the anti affinity policy id
- :return: result: The result from the CLC API call
- """
- result = None
- if not module.check_mode:
- try:
- result = clc.v2.API.Call('PUT',
- 'servers/%s/%s/antiAffinityPolicy' % (
- acct_alias,
- server_id),
- json.dumps({"id": aa_policy_id}))
- except APIFailedResponse as ex:
- module.fail_json(
- msg='Unable to modify anti affinity policy to server : "{0}". {1}'.format(
- server_id, str(ex.response_text)))
- return result
-
- @staticmethod
- def _delete_aa_policy(clc, module, acct_alias, server_id):
- """
- Delete the anti affinity policy of the CLC server
- :param clc: the clc-sdk instance to use
- :param module: the AnsibleModule object
- :param acct_alias: the CLC account alias
- :param server_id: the CLC server id
- :return: result: The result from the CLC API call
- """
- result = None
- if not module.check_mode:
- try:
- result = clc.v2.API.Call('DELETE',
- 'servers/%s/%s/antiAffinityPolicy' % (
- acct_alias,
- server_id),
- json.dumps({}))
- except APIFailedResponse as ex:
- module.fail_json(
- msg='Unable to delete anti affinity policy to server : "{0}". {1}'.format(
- server_id, str(ex.response_text)))
- return result
-
- @staticmethod
- def _get_aa_policy_id_by_name(clc, module, alias, aa_policy_name):
- """
- retrieves the anti affinity policy id of the server based on the name of the policy
- :param clc: the clc-sdk instance to use
- :param module: the AnsibleModule object
- :param alias: the CLC account alias
- :param aa_policy_name: the anti affinity policy name
- :return: aa_policy_id: The anti affinity policy id
- """
- aa_policy_id = None
- try:
- aa_policies = clc.v2.API.Call(method='GET',
- url='antiAffinityPolicies/%s' % alias)
- except APIFailedResponse as ex:
- return module.fail_json(
- msg='Unable to fetch anti affinity policies from account alias : "{0}". {1}'.format(
- alias, str(ex.response_text)))
- for aa_policy in aa_policies.get('items'):
- if aa_policy.get('name') == aa_policy_name:
- if not aa_policy_id:
- aa_policy_id = aa_policy.get('id')
- else:
- return module.fail_json(
- msg='multiple anti affinity policies were found with policy name : %s' % aa_policy_name)
- if not aa_policy_id:
- module.fail_json(
- msg='No anti affinity policy was found with policy name : %s' % aa_policy_name)
- return aa_policy_id
-
- @staticmethod
- def _get_aa_policy_id_of_server(clc, module, alias, server_id):
- """
- retrieves the anti affinity policy id of the server based on the CLC server id
- :param clc: the clc-sdk instance to use
- :param module: the AnsibleModule object
- :param alias: the CLC account alias
- :param server_id: the CLC server id
- :return: aa_policy_id: The anti affinity policy id
- """
- aa_policy_id = None
- try:
- result = clc.v2.API.Call(
- method='GET', url='servers/%s/%s/antiAffinityPolicy' %
- (alias, server_id))
- aa_policy_id = result.get('id')
- except APIFailedResponse as ex:
- if ex.response_status_code != 404:
- module.fail_json(msg='Unable to fetch anti affinity policy for server "{0}". {1}'.format(
- server_id, str(ex.response_text)))
- return aa_policy_id
-
- def _ensure_alert_policy_present(
- self, server, server_params):
- """
- ensures the server is updated with the provided alert policy
- :param server: the CLC server object
- :param server_params: the dictionary of server parameters
- :return: (changed, group) -
- changed: Boolean whether a change was made
- result: The result from the CLC API call
- """
- changed = False
- acct_alias = self.clc.v2.Account.GetAlias()
- alert_policy_id = server_params.get('alert_policy_id')
- alert_policy_name = server_params.get('alert_policy_name')
- if not alert_policy_id and alert_policy_name:
- alert_policy_id = self._get_alert_policy_id_by_name(
- self.clc,
- self.module,
- acct_alias,
- alert_policy_name)
- if alert_policy_id and not self._alert_policy_exists(
- server, alert_policy_id):
- self._add_alert_policy_to_server(
- self.clc,
- self.module,
- acct_alias,
- server.id,
- alert_policy_id)
- changed = True
- return changed
-
- def _ensure_alert_policy_absent(
- self, server, server_params):
- """
- ensures the alert policy is removed from the server
- :param server: the CLC server object
- :param server_params: the dictionary of server parameters
- :return: (changed, group) -
- changed: Boolean whether a change was made
- result: The result from the CLC API call
- """
- changed = False
-
- acct_alias = self.clc.v2.Account.GetAlias()
- alert_policy_id = server_params.get('alert_policy_id')
- alert_policy_name = server_params.get('alert_policy_name')
- if not alert_policy_id and alert_policy_name:
- alert_policy_id = self._get_alert_policy_id_by_name(
- self.clc,
- self.module,
- acct_alias,
- alert_policy_name)
-
- if alert_policy_id and self._alert_policy_exists(
- server, alert_policy_id):
- self._remove_alert_policy_to_server(
- self.clc,
- self.module,
- acct_alias,
- server.id,
- alert_policy_id)
- changed = True
- return changed
-
- @staticmethod
- def _add_alert_policy_to_server(
- clc, module, acct_alias, server_id, alert_policy_id):
- """
- add the alert policy to CLC server
- :param clc: the clc-sdk instance to use
- :param module: the AnsibleModule object
- :param acct_alias: the CLC account alias
- :param server_id: the CLC server id
- :param alert_policy_id: the alert policy id
- :return: result: The result from the CLC API call
- """
- result = None
- if not module.check_mode:
- try:
- result = clc.v2.API.Call('POST',
- 'servers/%s/%s/alertPolicies' % (
- acct_alias,
- server_id),
- json.dumps({"id": alert_policy_id}))
- except APIFailedResponse as ex:
- module.fail_json(msg='Unable to set alert policy to the server : "{0}". {1}'.format(
- server_id, str(ex.response_text)))
- return result
-
- @staticmethod
- def _remove_alert_policy_to_server(
- clc, module, acct_alias, server_id, alert_policy_id):
- """
- remove the alert policy to the CLC server
- :param clc: the clc-sdk instance to use
- :param module: the AnsibleModule object
- :param acct_alias: the CLC account alias
- :param server_id: the CLC server id
- :param alert_policy_id: the alert policy id
- :return: result: The result from the CLC API call
- """
- result = None
- if not module.check_mode:
- try:
- result = clc.v2.API.Call('DELETE',
- 'servers/%s/%s/alertPolicies/%s'
- % (acct_alias, server_id, alert_policy_id))
- except APIFailedResponse as ex:
- module.fail_json(msg='Unable to remove alert policy from the server : "{0}". {1}'.format(
- server_id, str(ex.response_text)))
- return result
-
- @staticmethod
- def _get_alert_policy_id_by_name(clc, module, alias, alert_policy_name):
- """
- retrieves the alert policy id of the server based on the name of the policy
- :param clc: the clc-sdk instance to use
- :param module: the AnsibleModule object
- :param alias: the CLC account alias
- :param alert_policy_name: the alert policy name
- :return: alert_policy_id: The alert policy id
- """
- alert_policy_id = None
- try:
- alert_policies = clc.v2.API.Call(method='GET',
- url='alertPolicies/%s' % alias)
- except APIFailedResponse as ex:
- return module.fail_json(msg='Unable to fetch alert policies for account : "{0}". {1}'.format(
- alias, str(ex.response_text)))
- for alert_policy in alert_policies.get('items'):
- if alert_policy.get('name') == alert_policy_name:
- if not alert_policy_id:
- alert_policy_id = alert_policy.get('id')
- else:
- return module.fail_json(
- msg='multiple alert policies were found with policy name : %s' % alert_policy_name)
- return alert_policy_id
-
- @staticmethod
- def _alert_policy_exists(server, alert_policy_id):
- """
- Checks if the alert policy exists for the server
- :param server: the clc server object
- :param alert_policy_id: the alert policy
- :return: True: if the given alert policy id associated to the server, False otherwise
- """
- result = False
- alert_policies = server.alertPolicies
- if alert_policies:
- for alert_policy in alert_policies:
- if alert_policy.get('id') == alert_policy_id:
- result = True
- return result
-
- @staticmethod
- def _set_user_agent(clc):
- if hasattr(clc, 'SetRequestsSession'):
- agent_string = "ClcAnsibleModule/" + __version__
- ses = requests.Session()
- ses.headers.update({"Api-Client": agent_string})
- ses.headers['User-Agent'] += " " + agent_string
- clc.SetRequestsSession(ses)
-
-
-def main():
- """
- The main function. Instantiates the module and calls process_request.
- :return: none
- """
-
- argument_dict = ClcModifyServer._define_module_argument_spec()
- module = AnsibleModule(supports_check_mode=True, **argument_dict)
- clc_modify_server = ClcModifyServer(module)
- clc_modify_server.process_request()
-
-
-if __name__ == '__main__':
- main()
diff --git a/plugins/modules/clc_publicip.py b/plugins/modules/clc_publicip.py
deleted file mode 100644
index c1bffcea04..0000000000
--- a/plugins/modules/clc_publicip.py
+++ /dev/null
@@ -1,369 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-#
-# Copyright (c) 2015 CenturyLink
-# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
-module: clc_publicip
-short_description: Add and Delete public ips on servers in CenturyLink Cloud
-description:
- - An Ansible module to add or delete public ip addresses on an existing server or servers in CenturyLink Cloud.
-extends_documentation_fragment:
- - community.general.attributes
-attributes:
- check_mode:
- support: full
- diff_mode:
- support: none
-options:
- protocol:
- description:
- - The protocol that the public IP will listen for.
- type: str
- default: TCP
- choices: ['TCP', 'UDP', 'ICMP']
- ports:
- description:
- - A list of ports to expose. This is required when state is 'present'
- type: list
- elements: int
- server_ids:
- description:
- - A list of servers to create public ips on.
- type: list
- required: true
- elements: str
- state:
- description:
- - Determine whether to create or delete public IPs. If present module will not create a second public ip if one
- already exists.
- type: str
- default: present
- choices: ['present', 'absent']
- wait:
- description:
- - Whether to wait for the tasks to finish before returning.
- type: bool
- default: true
-requirements:
- - python = 2.7
- - requests >= 2.5.0
- - clc-sdk
-author: "CLC Runner (@clc-runner)"
-notes:
- - To use this module, it is required to set the below environment variables which enables access to the
- Centurylink Cloud
- - CLC_V2_API_USERNAME, the account login id for the centurylink cloud
- - CLC_V2_API_PASSWORD, the account password for the centurylink cloud
- - Alternatively, the module accepts the API token and account alias. The API token can be generated using the
- CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login
- - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login
- - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud
- - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment.
-'''
-
-EXAMPLES = '''
-# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples
-
-- name: Add Public IP to Server
- hosts: localhost
- gather_facts: false
- connection: local
- tasks:
- - name: Create Public IP For Servers
- community.general.clc_publicip:
- protocol: TCP
- ports:
- - 80
- server_ids:
- - UC1TEST-SVR01
- - UC1TEST-SVR02
- state: present
- register: clc
-
- - name: Debug
- ansible.builtin.debug:
- var: clc
-
-- name: Delete Public IP from Server
- hosts: localhost
- gather_facts: false
- connection: local
- tasks:
- - name: Create Public IP For Servers
- community.general.clc_publicip:
- server_ids:
- - UC1TEST-SVR01
- - UC1TEST-SVR02
- state: absent
- register: clc
-
- - name: Debug
- ansible.builtin.debug:
- var: clc
-'''
-
-RETURN = '''
-server_ids:
- description: The list of server ids that are changed
- returned: success
- type: list
- sample:
- [
- "UC1TEST-SVR01",
- "UC1TEST-SVR02"
- ]
-'''
-
-__version__ = '${version}'
-
-import os
-import traceback
-
-from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
-
-REQUESTS_IMP_ERR = None
-try:
- import requests
-except ImportError:
- REQUESTS_IMP_ERR = traceback.format_exc()
- REQUESTS_FOUND = False
-else:
- REQUESTS_FOUND = True
-
-#
-# Requires the clc-python-sdk.
-# sudo pip install clc-sdk
-#
-CLC_IMP_ERR = None
-try:
- import clc as clc_sdk
- from clc import CLCException
-except ImportError:
- CLC_IMP_ERR = traceback.format_exc()
- CLC_FOUND = False
- clc_sdk = None
-else:
- CLC_FOUND = True
-
-from ansible.module_utils.basic import AnsibleModule, missing_required_lib
-
-
-class ClcPublicIp(object):
- clc = clc_sdk
- module = None
-
- def __init__(self, module):
- """
- Construct module
- """
- self.module = module
- if not CLC_FOUND:
- self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR)
- if not REQUESTS_FOUND:
- self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR)
- if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'):
- self.module.fail_json(
- msg='requests library version should be >= 2.5.0')
-
- self._set_user_agent(self.clc)
-
- def process_request(self):
- """
- Process the request - Main Code Path
- :return: Returns with either an exit_json or fail_json
- """
- self._set_clc_credentials_from_env()
- params = self.module.params
- server_ids = params['server_ids']
- ports = params['ports']
- protocol = params['protocol']
- state = params['state']
-
- if state == 'present':
- changed, changed_server_ids, requests = self.ensure_public_ip_present(
- server_ids=server_ids, protocol=protocol, ports=ports)
- elif state == 'absent':
- changed, changed_server_ids, requests = self.ensure_public_ip_absent(
- server_ids=server_ids)
- else:
- return self.module.fail_json(msg="Unknown State: " + state)
- self._wait_for_requests_to_complete(requests)
- return self.module.exit_json(changed=changed,
- server_ids=changed_server_ids)
-
- @staticmethod
- def _define_module_argument_spec():
- """
- Define the argument spec for the ansible module
- :return: argument spec dictionary
- """
- argument_spec = dict(
- server_ids=dict(type='list', required=True, elements='str'),
- protocol=dict(default='TCP', choices=['TCP', 'UDP', 'ICMP']),
- ports=dict(type='list', elements='int'),
- wait=dict(type='bool', default=True),
- state=dict(default='present', choices=['present', 'absent']),
- )
- return argument_spec
-
- def ensure_public_ip_present(self, server_ids, protocol, ports):
- """
- Ensures the given server ids having the public ip available
- :param server_ids: the list of server ids
- :param protocol: the ip protocol
- :param ports: the list of ports to expose
- :return: (changed, changed_server_ids, results)
- changed: A flag indicating if there is any change
- changed_server_ids : the list of server ids that are changed
- results: The result list from clc public ip call
- """
- changed = False
- results = []
- changed_server_ids = []
- servers = self._get_servers_from_clc(
- server_ids,
- 'Failed to obtain server list from the CLC API')
- servers_to_change = [
- server for server in servers if len(
- server.PublicIPs().public_ips) == 0]
- ports_to_expose = [{'protocol': protocol, 'port': port}
- for port in ports]
- for server in servers_to_change:
- if not self.module.check_mode:
- result = self._add_publicip_to_server(server, ports_to_expose)
- results.append(result)
- changed_server_ids.append(server.id)
- changed = True
- return changed, changed_server_ids, results
-
- def _add_publicip_to_server(self, server, ports_to_expose):
- result = None
- try:
- result = server.PublicIPs().Add(ports_to_expose)
- except CLCException as ex:
- self.module.fail_json(msg='Failed to add public ip to the server : {0}. {1}'.format(
- server.id, ex.response_text
- ))
- return result
-
- def ensure_public_ip_absent(self, server_ids):
- """
- Ensures the given server ids having the public ip removed if there is any
- :param server_ids: the list of server ids
- :return: (changed, changed_server_ids, results)
- changed: A flag indicating if there is any change
- changed_server_ids : the list of server ids that are changed
- results: The result list from clc public ip call
- """
- changed = False
- results = []
- changed_server_ids = []
- servers = self._get_servers_from_clc(
- server_ids,
- 'Failed to obtain server list from the CLC API')
- servers_to_change = [
- server for server in servers if len(
- server.PublicIPs().public_ips) > 0]
- for server in servers_to_change:
- if not self.module.check_mode:
- result = self._remove_publicip_from_server(server)
- results.append(result)
- changed_server_ids.append(server.id)
- changed = True
- return changed, changed_server_ids, results
-
- def _remove_publicip_from_server(self, server):
- result = None
- try:
- for ip_address in server.PublicIPs().public_ips:
- result = ip_address.Delete()
- except CLCException as ex:
- self.module.fail_json(msg='Failed to remove public ip from the server : {0}. {1}'.format(
- server.id, ex.response_text
- ))
- return result
-
- def _wait_for_requests_to_complete(self, requests_lst):
- """
- Waits until the CLC requests are complete if the wait argument is True
- :param requests_lst: The list of CLC request objects
- :return: none
- """
- if not self.module.params['wait']:
- return
- for request in requests_lst:
- request.WaitUntilComplete()
- for request_details in request.requests:
- if request_details.Status() != 'succeeded':
- self.module.fail_json(
- msg='Unable to process public ip request')
-
- def _set_clc_credentials_from_env(self):
- """
- Set the CLC Credentials on the sdk by reading environment variables
- :return: none
- """
- env = os.environ
- v2_api_token = env.get('CLC_V2_API_TOKEN', False)
- v2_api_username = env.get('CLC_V2_API_USERNAME', False)
- v2_api_passwd = env.get('CLC_V2_API_PASSWD', False)
- clc_alias = env.get('CLC_ACCT_ALIAS', False)
- api_url = env.get('CLC_V2_API_URL', False)
-
- if api_url:
- self.clc.defaults.ENDPOINT_URL_V2 = api_url
-
- if v2_api_token and clc_alias:
- self.clc._LOGIN_TOKEN_V2 = v2_api_token
- self.clc._V2_ENABLED = True
- self.clc.ALIAS = clc_alias
- elif v2_api_username and v2_api_passwd:
- self.clc.v2.SetCredentials(
- api_username=v2_api_username,
- api_passwd=v2_api_passwd)
- else:
- return self.module.fail_json(
- msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD "
- "environment variables")
-
- def _get_servers_from_clc(self, server_ids, message):
- """
- Gets list of servers form CLC api
- """
- try:
- return self.clc.v2.Servers(server_ids).servers
- except CLCException as exception:
- self.module.fail_json(msg=message + ': %s' % exception)
-
- @staticmethod
- def _set_user_agent(clc):
- if hasattr(clc, 'SetRequestsSession'):
- agent_string = "ClcAnsibleModule/" + __version__
- ses = requests.Session()
- ses.headers.update({"Api-Client": agent_string})
- ses.headers['User-Agent'] += " " + agent_string
- clc.SetRequestsSession(ses)
-
-
-def main():
- """
- The main function. Instantiates the module and calls process_request.
- :return: none
- """
- module = AnsibleModule(
- argument_spec=ClcPublicIp._define_module_argument_spec(),
- supports_check_mode=True
- )
- clc_public_ip = ClcPublicIp(module)
- clc_public_ip.process_request()
-
-
-if __name__ == '__main__':
- main()
diff --git a/plugins/modules/clc_server.py b/plugins/modules/clc_server.py
deleted file mode 100644
index 6bfe5a9b9e..0000000000
--- a/plugins/modules/clc_server.py
+++ /dev/null
@@ -1,1570 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-#
-# Copyright (c) 2015 CenturyLink
-# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
-module: clc_server
-short_description: Create, Delete, Start and Stop servers in CenturyLink Cloud
-description:
- - An Ansible module to Create, Delete, Start and Stop servers in CenturyLink Cloud.
-extends_documentation_fragment:
- - community.general.attributes
-attributes:
- check_mode:
- support: full
- diff_mode:
- support: none
-options:
- additional_disks:
- description:
- - The list of additional disks for the server
- type: list
- elements: dict
- default: []
- add_public_ip:
- description:
- - Whether to add a public ip to the server
- type: bool
- default: false
- alias:
- description:
- - The account alias to provision the servers under.
- type: str
- anti_affinity_policy_id:
- description:
- - The anti-affinity policy to assign to the server. This is mutually exclusive with 'anti_affinity_policy_name'.
- type: str
- anti_affinity_policy_name:
- description:
- - The anti-affinity policy to assign to the server. This is mutually exclusive with 'anti_affinity_policy_id'.
- type: str
- alert_policy_id:
- description:
- - The alert policy to assign to the server. This is mutually exclusive with 'alert_policy_name'.
- type: str
- alert_policy_name:
- description:
- - The alert policy to assign to the server. This is mutually exclusive with 'alert_policy_id'.
- type: str
- count:
- description:
- - The number of servers to build (mutually exclusive with exact_count)
- default: 1
- type: int
- count_group:
- description:
- - Required when exact_count is specified. The Server Group use to determine how many servers to deploy.
- type: str
- cpu:
- description:
- - How many CPUs to provision on the server
- default: 1
- type: int
- cpu_autoscale_policy_id:
- description:
- - The autoscale policy to assign to the server.
- type: str
- custom_fields:
- description:
- - The list of custom fields to set on the server.
- type: list
- default: []
- elements: dict
- description:
- description:
- - The description to set for the server.
- type: str
- exact_count:
- description:
- - Run in idempotent mode. Will insure that this exact number of servers are running in the provided group,
- creating and deleting them to reach that count. Requires count_group to be set.
- type: int
- group:
- description:
- - The Server Group to create servers under.
- type: str
- default: 'Default Group'
- ip_address:
- description:
- - The IP Address for the server. One is assigned if not provided.
- type: str
- location:
- description:
- - The Datacenter to create servers in.
- type: str
- managed_os:
- description:
- - Whether to create the server as 'Managed' or not.
- type: bool
- default: false
- required: false
- memory:
- description:
- - Memory in GB.
- type: int
- default: 1
- name:
- description:
- - A 1 to 6 character identifier to use for the server. This is required when state is 'present'
- type: str
- network_id:
- description:
- - The network UUID on which to create servers.
- type: str
- packages:
- description:
- - The list of blue print packages to run on the server after its created.
- type: list
- elements: dict
- default: []
- password:
- description:
- - Password for the administrator / root user
- type: str
- primary_dns:
- description:
- - Primary DNS used by the server.
- type: str
- public_ip_protocol:
- description:
- - The protocol to use for the public ip if add_public_ip is set to True.
- type: str
- default: 'TCP'
- choices: ['TCP', 'UDP', 'ICMP']
- public_ip_ports:
- description:
- - A list of ports to allow on the firewall to the servers public ip, if add_public_ip is set to True.
- type: list
- elements: dict
- default: []
- secondary_dns:
- description:
- - Secondary DNS used by the server.
- type: str
- server_ids:
- description:
- - Required for started, stopped, and absent states.
- A list of server Ids to insure are started, stopped, or absent.
- type: list
- default: []
- elements: str
- source_server_password:
- description:
- - The password for the source server if a clone is specified.
- type: str
- state:
- description:
- - The state to insure that the provided resources are in.
- type: str
- default: 'present'
- choices: ['present', 'absent', 'started', 'stopped']
- storage_type:
- description:
- - The type of storage to attach to the server.
- type: str
- default: 'standard'
- choices: ['standard', 'hyperscale']
- template:
- description:
- - The template to use for server creation. Will search for a template if a partial string is provided.
- This is required when state is 'present'
- type: str
- ttl:
- description:
- - The time to live for the server in seconds. The server will be deleted when this time expires.
- type: str
- type:
- description:
- - The type of server to create.
- type: str
- default: 'standard'
- choices: ['standard', 'hyperscale', 'bareMetal']
- configuration_id:
- description:
- - Only required for bare metal servers.
- Specifies the identifier for the specific configuration type of bare metal server to deploy.
- type: str
- os_type:
- description:
- - Only required for bare metal servers.
- Specifies the OS to provision with the bare metal server.
- type: str
- choices: ['redHat6_64Bit', 'centOS6_64Bit', 'windows2012R2Standard_64Bit', 'ubuntu14_64Bit']
- wait:
- description:
- - Whether to wait for the provisioning tasks to finish before returning.
- type: bool
- default: true
-requirements:
- - python = 2.7
- - requests >= 2.5.0
- - clc-sdk
-author: "CLC Runner (@clc-runner)"
-notes:
- - To use this module, it is required to set the below environment variables which enables access to the
- Centurylink Cloud
- - CLC_V2_API_USERNAME, the account login id for the centurylink cloud
- - CLC_V2_API_PASSWORD, the account password for the centurylink cloud
- - Alternatively, the module accepts the API token and account alias. The API token can be generated using the
- CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login
- - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login
- - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud
- - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment.
-'''
-
-EXAMPLES = '''
-# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples
-
-- name: Provision a single Ubuntu Server
- community.general.clc_server:
- name: test
- template: ubuntu-14-64
- count: 1
- group: Default Group
- state: present
-
-- name: Ensure 'Default Group' has exactly 5 servers
- community.general.clc_server:
- name: test
- template: ubuntu-14-64
- exact_count: 5
- count_group: Default Group
- group: Default Group
-
-- name: Stop a Server
- community.general.clc_server:
- server_ids:
- - UC1ACCT-TEST01
- state: stopped
-
-- name: Start a Server
- community.general.clc_server:
- server_ids:
- - UC1ACCT-TEST01
- state: started
-
-- name: Delete a Server
- community.general.clc_server:
- server_ids:
- - UC1ACCT-TEST01
- state: absent
-'''
-
-RETURN = '''
-server_ids:
- description: The list of server ids that are created
- returned: success
- type: list
- sample:
- [
- "UC1TEST-SVR01",
- "UC1TEST-SVR02"
- ]
-partially_created_server_ids:
- description: The list of server ids that are partially created
- returned: success
- type: list
- sample:
- [
- "UC1TEST-SVR01",
- "UC1TEST-SVR02"
- ]
-servers:
- description: The list of server objects returned from CLC
- returned: success
- type: list
- sample:
- [
- {
- "changeInfo":{
- "createdBy":"service.wfad",
- "createdDate":1438196820,
- "modifiedBy":"service.wfad",
- "modifiedDate":1438196820
- },
- "description":"test-server",
- "details":{
- "alertPolicies":[
-
- ],
- "cpu":1,
- "customFields":[
-
- ],
- "diskCount":3,
- "disks":[
- {
- "id":"0:0",
- "partitionPaths":[
-
- ],
- "sizeGB":1
- },
- {
- "id":"0:1",
- "partitionPaths":[
-
- ],
- "sizeGB":2
- },
- {
- "id":"0:2",
- "partitionPaths":[
-
- ],
- "sizeGB":14
- }
- ],
- "hostName":"",
- "inMaintenanceMode":false,
- "ipAddresses":[
- {
- "internal":"10.1.1.1"
- }
- ],
- "memoryGB":1,
- "memoryMB":1024,
- "partitions":[
-
- ],
- "powerState":"started",
- "snapshots":[
-
- ],
- "storageGB":17
- },
- "groupId":"086ac1dfe0b6411989e8d1b77c4065f0",
- "id":"test-server",
- "ipaddress":"10.120.45.23",
- "isTemplate":false,
- "links":[
- {
- "href":"/v2/servers/wfad/test-server",
- "id":"test-server",
- "rel":"self",
- "verbs":[
- "GET",
- "PATCH",
- "DELETE"
- ]
- },
- {
- "href":"/v2/groups/wfad/086ac1dfe0b6411989e8d1b77c4065f0",
- "id":"086ac1dfe0b6411989e8d1b77c4065f0",
- "rel":"group"
- },
- {
- "href":"/v2/accounts/wfad",
- "id":"wfad",
- "rel":"account"
- },
- {
- "href":"/v2/billing/wfad/serverPricing/test-server",
- "rel":"billing"
- },
- {
- "href":"/v2/servers/wfad/test-server/publicIPAddresses",
- "rel":"publicIPAddresses",
- "verbs":[
- "POST"
- ]
- },
- {
- "href":"/v2/servers/wfad/test-server/credentials",
- "rel":"credentials"
- },
- {
- "href":"/v2/servers/wfad/test-server/statistics",
- "rel":"statistics"
- },
- {
- "href":"/v2/servers/wfad/510ec21ae82d4dc89d28479753bf736a/upcomingScheduledActivities",
- "rel":"upcomingScheduledActivities"
- },
- {
- "href":"/v2/servers/wfad/510ec21ae82d4dc89d28479753bf736a/scheduledActivities",
- "rel":"scheduledActivities",
- "verbs":[
- "GET",
- "POST"
- ]
- },
- {
- "href":"/v2/servers/wfad/test-server/capabilities",
- "rel":"capabilities"
- },
- {
- "href":"/v2/servers/wfad/test-server/alertPolicies",
- "rel":"alertPolicyMappings",
- "verbs":[
- "POST"
- ]
- },
- {
- "href":"/v2/servers/wfad/test-server/antiAffinityPolicy",
- "rel":"antiAffinityPolicyMapping",
- "verbs":[
- "PUT",
- "DELETE"
- ]
- },
- {
- "href":"/v2/servers/wfad/test-server/cpuAutoscalePolicy",
- "rel":"cpuAutoscalePolicyMapping",
- "verbs":[
- "PUT",
- "DELETE"
- ]
- }
- ],
- "locationId":"UC1",
- "name":"test-server",
- "os":"ubuntu14_64Bit",
- "osType":"Ubuntu 14 64-bit",
- "status":"active",
- "storageType":"standard",
- "type":"standard"
- }
- ]
-'''
-
-__version__ = '${version}'
-
-import json
-import os
-import time
-import traceback
-
-from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
-
-REQUESTS_IMP_ERR = None
-try:
- import requests
-except ImportError:
- REQUESTS_IMP_ERR = traceback.format_exc()
- REQUESTS_FOUND = False
-else:
- REQUESTS_FOUND = True
-
-#
-# Requires the clc-python-sdk.
-# sudo pip install clc-sdk
-#
-CLC_IMP_ERR = None
-try:
- import clc as clc_sdk
- from clc import CLCException
- from clc import APIFailedResponse
-except ImportError:
- CLC_IMP_ERR = traceback.format_exc()
- CLC_FOUND = False
- clc_sdk = None
-else:
- CLC_FOUND = True
-
-from ansible.module_utils.basic import AnsibleModule, missing_required_lib
-
-
-class ClcServer:
- clc = clc_sdk
-
- def __init__(self, module):
- """
- Construct module
- """
- self.clc = clc_sdk
- self.module = module
- self.group_dict = {}
-
- if not CLC_FOUND:
- self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR)
- if not REQUESTS_FOUND:
- self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR)
- if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'):
- self.module.fail_json(
- msg='requests library version should be >= 2.5.0')
-
- self._set_user_agent(self.clc)
-
- def process_request(self):
- """
- Process the request - Main Code Path
- :return: Returns with either an exit_json or fail_json
- """
- changed = False
- new_server_ids = []
- server_dict_array = []
-
- self._set_clc_credentials_from_env()
- self.module.params = self._validate_module_params(
- self.clc,
- self.module)
- p = self.module.params
- state = p.get('state')
-
- #
- # Handle each state
- #
- partial_servers_ids = []
- if state == 'absent':
- server_ids = p['server_ids']
- if not isinstance(server_ids, list):
- return self.module.fail_json(
- msg='server_ids needs to be a list of instances to delete: %s' %
- server_ids)
-
- (changed,
- server_dict_array,
- new_server_ids) = self._delete_servers(module=self.module,
- clc=self.clc,
- server_ids=server_ids)
-
- elif state in ('started', 'stopped'):
- server_ids = p.get('server_ids')
- if not isinstance(server_ids, list):
- return self.module.fail_json(
- msg='server_ids needs to be a list of servers to run: %s' %
- server_ids)
-
- (changed,
- server_dict_array,
- new_server_ids) = self._start_stop_servers(self.module,
- self.clc,
- server_ids)
-
- elif state == 'present':
- # Changed is always set to true when provisioning new instances
- if not p.get('template') and p.get('type') != 'bareMetal':
- return self.module.fail_json(
- msg='template parameter is required for new instance')
-
- if p.get('exact_count') is None:
- (server_dict_array,
- new_server_ids,
- partial_servers_ids,
- changed) = self._create_servers(self.module,
- self.clc)
- else:
- (server_dict_array,
- new_server_ids,
- partial_servers_ids,
- changed) = self._enforce_count(self.module,
- self.clc)
-
- self.module.exit_json(
- changed=changed,
- server_ids=new_server_ids,
- partially_created_server_ids=partial_servers_ids,
- servers=server_dict_array)
-
- @staticmethod
- def _define_module_argument_spec():
- """
- Define the argument spec for the ansible module
- :return: argument spec dictionary
- """
- argument_spec = dict(
- name=dict(),
- template=dict(),
- group=dict(default='Default Group'),
- network_id=dict(),
- location=dict(),
- cpu=dict(default=1, type='int'),
- memory=dict(default=1, type='int'),
- alias=dict(),
- password=dict(no_log=True),
- ip_address=dict(),
- storage_type=dict(
- default='standard',
- choices=[
- 'standard',
- 'hyperscale']),
- type=dict(default='standard', choices=['standard', 'hyperscale', 'bareMetal']),
- primary_dns=dict(),
- secondary_dns=dict(),
- additional_disks=dict(type='list', default=[], elements='dict'),
- custom_fields=dict(type='list', default=[], elements='dict'),
- ttl=dict(),
- managed_os=dict(type='bool', default=False),
- description=dict(),
- source_server_password=dict(no_log=True),
- cpu_autoscale_policy_id=dict(),
- anti_affinity_policy_id=dict(),
- anti_affinity_policy_name=dict(),
- alert_policy_id=dict(),
- alert_policy_name=dict(),
- packages=dict(type='list', default=[], elements='dict'),
- state=dict(
- default='present',
- choices=[
- 'present',
- 'absent',
- 'started',
- 'stopped']),
- count=dict(type='int', default=1),
- exact_count=dict(type='int', ),
- count_group=dict(),
- server_ids=dict(type='list', default=[], elements='str'),
- add_public_ip=dict(type='bool', default=False),
- public_ip_protocol=dict(
- default='TCP',
- choices=[
- 'TCP',
- 'UDP',
- 'ICMP']),
- public_ip_ports=dict(type='list', default=[], elements='dict'),
- configuration_id=dict(),
- os_type=dict(choices=[
- 'redHat6_64Bit',
- 'centOS6_64Bit',
- 'windows2012R2Standard_64Bit',
- 'ubuntu14_64Bit'
- ]),
- wait=dict(type='bool', default=True))
-
- mutually_exclusive = [
- ['exact_count', 'count'],
- ['exact_count', 'state'],
- ['anti_affinity_policy_id', 'anti_affinity_policy_name'],
- ['alert_policy_id', 'alert_policy_name'],
- ]
- return {"argument_spec": argument_spec,
- "mutually_exclusive": mutually_exclusive}
-
- def _set_clc_credentials_from_env(self):
- """
- Set the CLC Credentials on the sdk by reading environment variables
- :return: none
- """
- env = os.environ
- v2_api_token = env.get('CLC_V2_API_TOKEN', False)
- v2_api_username = env.get('CLC_V2_API_USERNAME', False)
- v2_api_passwd = env.get('CLC_V2_API_PASSWD', False)
- clc_alias = env.get('CLC_ACCT_ALIAS', False)
- api_url = env.get('CLC_V2_API_URL', False)
- if api_url:
- self.clc.defaults.ENDPOINT_URL_V2 = api_url
-
- if v2_api_token and clc_alias:
- self.clc._LOGIN_TOKEN_V2 = v2_api_token
- self.clc._V2_ENABLED = True
- self.clc.ALIAS = clc_alias
- elif v2_api_username and v2_api_passwd:
- self.clc.v2.SetCredentials(
- api_username=v2_api_username,
- api_passwd=v2_api_passwd)
- else:
- return self.module.fail_json(
- msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD "
- "environment variables")
-
- @staticmethod
- def _validate_module_params(clc, module):
- """
- Validate the module params, and lookup default values.
- :param clc: clc-sdk instance to use
- :param module: module to validate
- :return: dictionary of validated params
- """
- params = module.params
- datacenter = ClcServer._find_datacenter(clc, module)
-
- ClcServer._validate_types(module)
- ClcServer._validate_name(module)
-
- params['alias'] = ClcServer._find_alias(clc, module)
- params['cpu'] = ClcServer._find_cpu(clc, module)
- params['memory'] = ClcServer._find_memory(clc, module)
- params['description'] = ClcServer._find_description(module)
- params['ttl'] = ClcServer._find_ttl(clc, module)
- params['template'] = ClcServer._find_template_id(module, datacenter)
- params['group'] = ClcServer._find_group(module, datacenter).id
- params['network_id'] = ClcServer._find_network_id(module, datacenter)
- params['anti_affinity_policy_id'] = ClcServer._find_aa_policy_id(
- clc,
- module)
- params['alert_policy_id'] = ClcServer._find_alert_policy_id(
- clc,
- module)
-
- return params
-
- @staticmethod
- def _find_datacenter(clc, module):
- """
- Find the datacenter by calling the CLC API.
- :param clc: clc-sdk instance to use
- :param module: module to validate
- :return: clc-sdk.Datacenter instance
- """
- location = module.params.get('location')
- try:
- if not location:
- account = clc.v2.Account()
- location = account.data.get('primaryDataCenter')
- data_center = clc.v2.Datacenter(location)
- return data_center
- except CLCException:
- module.fail_json(msg="Unable to find location: {0}".format(location))
-
- @staticmethod
- def _find_alias(clc, module):
- """
- Find or Validate the Account Alias by calling the CLC API
- :param clc: clc-sdk instance to use
- :param module: module to validate
- :return: clc-sdk.Account instance
- """
- alias = module.params.get('alias')
- if not alias:
- try:
- alias = clc.v2.Account.GetAlias()
- except CLCException as ex:
- module.fail_json(msg='Unable to find account alias. {0}'.format(
- ex.message
- ))
- return alias
-
- @staticmethod
- def _find_cpu(clc, module):
- """
- Find or validate the CPU value by calling the CLC API
- :param clc: clc-sdk instance to use
- :param module: module to validate
- :return: Int value for CPU
- """
- cpu = module.params.get('cpu')
- group_id = module.params.get('group_id')
- alias = module.params.get('alias')
- state = module.params.get('state')
-
- if not cpu and state == 'present':
- group = clc.v2.Group(id=group_id,
- alias=alias)
- if group.Defaults("cpu"):
- cpu = group.Defaults("cpu")
- else:
- module.fail_json(
- msg=str("Can\'t determine a default cpu value. Please provide a value for cpu."))
- return cpu
-
- @staticmethod
- def _find_memory(clc, module):
- """
- Find or validate the Memory value by calling the CLC API
- :param clc: clc-sdk instance to use
- :param module: module to validate
- :return: Int value for Memory
- """
- memory = module.params.get('memory')
- group_id = module.params.get('group_id')
- alias = module.params.get('alias')
- state = module.params.get('state')
-
- if not memory and state == 'present':
- group = clc.v2.Group(id=group_id,
- alias=alias)
- if group.Defaults("memory"):
- memory = group.Defaults("memory")
- else:
- module.fail_json(msg=str(
- "Can\'t determine a default memory value. Please provide a value for memory."))
- return memory
-
- @staticmethod
- def _find_description(module):
- """
- Set the description module param to name if description is blank
- :param module: the module to validate
- :return: string description
- """
- description = module.params.get('description')
- if not description:
- description = module.params.get('name')
- return description
-
- @staticmethod
- def _validate_types(module):
- """
- Validate that type and storage_type are set appropriately, and fail if not
- :param module: the module to validate
- :return: none
- """
- state = module.params.get('state')
- server_type = module.params.get(
- 'type').lower() if module.params.get('type') else None
- storage_type = module.params.get(
- 'storage_type').lower() if module.params.get('storage_type') else None
-
- if state == "present":
- if server_type == "standard" and storage_type not in (
- "standard", "premium"):
- module.fail_json(
- msg=str("Standard VMs must have storage_type = 'standard' or 'premium'"))
-
- if server_type == "hyperscale" and storage_type != "hyperscale":
- module.fail_json(
- msg=str("Hyperscale VMs must have storage_type = 'hyperscale'"))
-
- @staticmethod
- def _validate_name(module):
- """
- Validate that name is the correct length if provided, fail if it's not
- :param module: the module to validate
- :return: none
- """
- server_name = module.params.get('name')
- state = module.params.get('state')
-
- if state == 'present' and (
- len(server_name) < 1 or len(server_name) > 6):
- module.fail_json(msg=str(
- "When state = 'present', name must be a string with a minimum length of 1 and a maximum length of 6"))
-
- @staticmethod
- def _find_ttl(clc, module):
- """
- Validate that TTL is > 3600 if set, and fail if not
- :param clc: clc-sdk instance to use
- :param module: module to validate
- :return: validated ttl
- """
- ttl = module.params.get('ttl')
-
- if ttl:
- if ttl <= 3600:
- return module.fail_json(msg=str("Ttl cannot be <= 3600"))
- else:
- ttl = clc.v2.time_utils.SecondsToZuluTS(int(time.time()) + ttl)
- return ttl
-
- @staticmethod
- def _find_template_id(module, datacenter):
- """
- Find the template id by calling the CLC API.
- :param module: the module to validate
- :param datacenter: the datacenter to search for the template
- :return: a valid clc template id
- """
- lookup_template = module.params.get('template')
- state = module.params.get('state')
- type = module.params.get('type')
- result = None
-
- if state == 'present' and type != 'bareMetal':
- try:
- result = datacenter.Templates().Search(lookup_template)[0].id
- except CLCException:
- module.fail_json(
- msg=str(
- "Unable to find a template: " +
- lookup_template +
- " in location: " +
- datacenter.id))
- return result
-
- @staticmethod
- def _find_network_id(module, datacenter):
- """
- Validate the provided network id or return a default.
- :param module: the module to validate
- :param datacenter: the datacenter to search for a network id
- :return: a valid network id
- """
- network_id = module.params.get('network_id')
-
- if not network_id:
- try:
- network_id = datacenter.Networks().networks[0].id
- # -- added for clc-sdk 2.23 compatibility
- # datacenter_networks = clc_sdk.v2.Networks(
- # networks_lst=datacenter._DeploymentCapabilities()['deployableNetworks'])
- # network_id = datacenter_networks.networks[0].id
- # -- end
- except CLCException:
- module.fail_json(
- msg=str(
- "Unable to find a network in location: " +
- datacenter.id))
-
- return network_id
-
- @staticmethod
- def _find_aa_policy_id(clc, module):
- """
- Validate if the anti affinity policy exist for the given name and throw error if not
- :param clc: the clc-sdk instance
- :param module: the module to validate
- :return: aa_policy_id: the anti affinity policy id of the given name.
- """
- aa_policy_id = module.params.get('anti_affinity_policy_id')
- aa_policy_name = module.params.get('anti_affinity_policy_name')
- if not aa_policy_id and aa_policy_name:
- alias = module.params.get('alias')
- aa_policy_id = ClcServer._get_anti_affinity_policy_id(
- clc,
- module,
- alias,
- aa_policy_name)
- if not aa_policy_id:
- module.fail_json(
- msg='No anti affinity policy was found with policy name : %s' % aa_policy_name)
- return aa_policy_id
-
- @staticmethod
- def _find_alert_policy_id(clc, module):
- """
- Validate if the alert policy exist for the given name and throw error if not
- :param clc: the clc-sdk instance
- :param module: the module to validate
- :return: alert_policy_id: the alert policy id of the given name.
- """
- alert_policy_id = module.params.get('alert_policy_id')
- alert_policy_name = module.params.get('alert_policy_name')
- if not alert_policy_id and alert_policy_name:
- alias = module.params.get('alias')
- alert_policy_id = ClcServer._get_alert_policy_id_by_name(
- clc=clc,
- module=module,
- alias=alias,
- alert_policy_name=alert_policy_name
- )
- if not alert_policy_id:
- module.fail_json(
- msg='No alert policy exist with name : %s' % alert_policy_name)
- return alert_policy_id
-
- def _create_servers(self, module, clc, override_count=None):
- """
- Create New Servers in CLC cloud
- :param module: the AnsibleModule object
- :param clc: the clc-sdk instance to use
- :return: a list of dictionaries with server information about the servers that were created
- """
- p = module.params
- request_list = []
- servers = []
- server_dict_array = []
- created_server_ids = []
- partial_created_servers_ids = []
-
- add_public_ip = p.get('add_public_ip')
- public_ip_protocol = p.get('public_ip_protocol')
- public_ip_ports = p.get('public_ip_ports')
-
- params = {
- 'name': p.get('name'),
- 'template': p.get('template'),
- 'group_id': p.get('group'),
- 'network_id': p.get('network_id'),
- 'cpu': p.get('cpu'),
- 'memory': p.get('memory'),
- 'alias': p.get('alias'),
- 'password': p.get('password'),
- 'ip_address': p.get('ip_address'),
- 'storage_type': p.get('storage_type'),
- 'type': p.get('type'),
- 'primary_dns': p.get('primary_dns'),
- 'secondary_dns': p.get('secondary_dns'),
- 'additional_disks': p.get('additional_disks'),
- 'custom_fields': p.get('custom_fields'),
- 'ttl': p.get('ttl'),
- 'managed_os': p.get('managed_os'),
- 'description': p.get('description'),
- 'source_server_password': p.get('source_server_password'),
- 'cpu_autoscale_policy_id': p.get('cpu_autoscale_policy_id'),
- 'anti_affinity_policy_id': p.get('anti_affinity_policy_id'),
- 'packages': p.get('packages'),
- 'configuration_id': p.get('configuration_id'),
- 'os_type': p.get('os_type')
- }
-
- count = override_count if override_count else p.get('count')
-
- changed = False if count == 0 else True
-
- if not changed:
- return server_dict_array, created_server_ids, partial_created_servers_ids, changed
- for i in range(0, count):
- if not module.check_mode:
- req = self._create_clc_server(clc=clc,
- module=module,
- server_params=params)
- server = req.requests[0].Server()
- request_list.append(req)
- servers.append(server)
-
- self._wait_for_requests(module, request_list)
- self._refresh_servers(module, servers)
-
- ip_failed_servers = self._add_public_ip_to_servers(
- module=module,
- should_add_public_ip=add_public_ip,
- servers=servers,
- public_ip_protocol=public_ip_protocol,
- public_ip_ports=public_ip_ports)
- ap_failed_servers = self._add_alert_policy_to_servers(clc=clc,
- module=module,
- servers=servers)
-
- for server in servers:
- if server in ip_failed_servers or server in ap_failed_servers:
- partial_created_servers_ids.append(server.id)
- else:
- # reload server details
- server = clc.v2.Server(server.id)
- server.data['ipaddress'] = server.details[
- 'ipAddresses'][0]['internal']
-
- if add_public_ip and len(server.PublicIPs().public_ips) > 0:
- server.data['publicip'] = str(
- server.PublicIPs().public_ips[0])
- created_server_ids.append(server.id)
- server_dict_array.append(server.data)
-
- return server_dict_array, created_server_ids, partial_created_servers_ids, changed
-
- def _enforce_count(self, module, clc):
- """
- Enforce that there is the right number of servers in the provided group.
- Starts or stops servers as necessary.
- :param module: the AnsibleModule object
- :param clc: the clc-sdk instance to use
- :return: a list of dictionaries with server information about the servers that were created or deleted
- """
- p = module.params
- changed = False
- count_group = p.get('count_group')
- datacenter = ClcServer._find_datacenter(clc, module)
- exact_count = p.get('exact_count')
- server_dict_array = []
- partial_servers_ids = []
- changed_server_ids = []
-
- # fail here if the exact count was specified without filtering
- # on a group, as this may lead to a undesired removal of instances
- if exact_count and count_group is None:
- return module.fail_json(
- msg="you must use the 'count_group' option with exact_count")
-
- servers, running_servers = ClcServer._find_running_servers_by_group(
- module, datacenter, count_group)
-
- if len(running_servers) == exact_count:
- changed = False
-
- elif len(running_servers) < exact_count:
- to_create = exact_count - len(running_servers)
- server_dict_array, changed_server_ids, partial_servers_ids, changed \
- = self._create_servers(module, clc, override_count=to_create)
-
- for server in server_dict_array:
- running_servers.append(server)
-
- elif len(running_servers) > exact_count:
- to_remove = len(running_servers) - exact_count
- all_server_ids = sorted([x.id for x in running_servers])
- remove_ids = all_server_ids[0:to_remove]
-
- (changed, server_dict_array, changed_server_ids) \
- = ClcServer._delete_servers(module, clc, remove_ids)
-
- return server_dict_array, changed_server_ids, partial_servers_ids, changed
-
- @staticmethod
- def _wait_for_requests(module, request_list):
- """
- Block until server provisioning requests are completed.
- :param module: the AnsibleModule object
- :param request_list: a list of clc-sdk.Request instances
- :return: none
- """
- wait = module.params.get('wait')
- if wait:
- # Requests.WaitUntilComplete() returns the count of failed requests
- failed_requests_count = sum(
- [request.WaitUntilComplete() for request in request_list])
-
- if failed_requests_count > 0:
- module.fail_json(
- msg='Unable to process server request')
-
- @staticmethod
- def _refresh_servers(module, servers):
- """
- Loop through a list of servers and refresh them.
- :param module: the AnsibleModule object
- :param servers: list of clc-sdk.Server instances to refresh
- :return: none
- """
- for server in servers:
- try:
- server.Refresh()
- except CLCException as ex:
- module.fail_json(msg='Unable to refresh the server {0}. {1}'.format(
- server.id, ex.message
- ))
-
- @staticmethod
- def _add_public_ip_to_servers(
- module,
- should_add_public_ip,
- servers,
- public_ip_protocol,
- public_ip_ports):
- """
- Create a public IP for servers
- :param module: the AnsibleModule object
- :param should_add_public_ip: boolean - whether or not to provision a public ip for servers. Skipped if False
- :param servers: List of servers to add public ips to
- :param public_ip_protocol: a protocol to allow for the public ips
- :param public_ip_ports: list of ports to allow for the public ips
- :return: none
- """
- failed_servers = []
- if not should_add_public_ip:
- return failed_servers
-
- ports_lst = []
- request_list = []
- server = None
-
- for port in public_ip_ports:
- ports_lst.append(
- {'protocol': public_ip_protocol, 'port': port})
- try:
- if not module.check_mode:
- for server in servers:
- request = server.PublicIPs().Add(ports_lst)
- request_list.append(request)
- except APIFailedResponse:
- failed_servers.append(server)
- ClcServer._wait_for_requests(module, request_list)
- return failed_servers
-
- @staticmethod
- def _add_alert_policy_to_servers(clc, module, servers):
- """
- Associate the alert policy to servers
- :param clc: the clc-sdk instance to use
- :param module: the AnsibleModule object
- :param servers: List of servers to add alert policy to
- :return: failed_servers: the list of servers which failed while associating alert policy
- """
- failed_servers = []
- p = module.params
- alert_policy_id = p.get('alert_policy_id')
- alias = p.get('alias')
-
- if alert_policy_id and not module.check_mode:
- for server in servers:
- try:
- ClcServer._add_alert_policy_to_server(
- clc=clc,
- alias=alias,
- server_id=server.id,
- alert_policy_id=alert_policy_id)
- except CLCException:
- failed_servers.append(server)
- return failed_servers
-
- @staticmethod
- def _add_alert_policy_to_server(
- clc, alias, server_id, alert_policy_id):
- """
- Associate an alert policy to a clc server
- :param clc: the clc-sdk instance to use
- :param alias: the clc account alias
- :param server_id: The clc server id
- :param alert_policy_id: the alert policy id to be associated to the server
- :return: none
- """
- try:
- clc.v2.API.Call(
- method='POST',
- url='servers/%s/%s/alertPolicies' % (alias, server_id),
- payload=json.dumps(
- {
- 'id': alert_policy_id
- }))
- except APIFailedResponse as e:
- raise CLCException(
- 'Failed to associate alert policy to the server : {0} with Error {1}'.format(
- server_id, str(e.response_text)))
-
- @staticmethod
- def _get_alert_policy_id_by_name(clc, module, alias, alert_policy_name):
- """
- Returns the alert policy id for the given alert policy name
- :param clc: the clc-sdk instance to use
- :param module: the AnsibleModule object
- :param alias: the clc account alias
- :param alert_policy_name: the name of the alert policy
- :return: alert_policy_id: the alert policy id
- """
- alert_policy_id = None
- policies = clc.v2.API.Call('GET', '/v2/alertPolicies/%s' % alias)
- if not policies:
- return alert_policy_id
- for policy in policies.get('items'):
- if policy.get('name') == alert_policy_name:
- if not alert_policy_id:
- alert_policy_id = policy.get('id')
- else:
- return module.fail_json(
- msg='multiple alert policies were found with policy name : %s' % alert_policy_name)
- return alert_policy_id
-
- @staticmethod
- def _delete_servers(module, clc, server_ids):
- """
- Delete the servers on the provided list
- :param module: the AnsibleModule object
- :param clc: the clc-sdk instance to use
- :param server_ids: list of servers to delete
- :return: a list of dictionaries with server information about the servers that were deleted
- """
- terminated_server_ids = []
- server_dict_array = []
- request_list = []
-
- if not isinstance(server_ids, list) or len(server_ids) < 1:
- return module.fail_json(
- msg='server_ids should be a list of servers, aborting')
-
- servers = clc.v2.Servers(server_ids).Servers()
- for server in servers:
- if not module.check_mode:
- request_list.append(server.Delete())
- ClcServer._wait_for_requests(module, request_list)
-
- for server in servers:
- terminated_server_ids.append(server.id)
-
- return True, server_dict_array, terminated_server_ids
-
- @staticmethod
- def _start_stop_servers(module, clc, server_ids):
- """
- Start or Stop the servers on the provided list
- :param module: the AnsibleModule object
- :param clc: the clc-sdk instance to use
- :param server_ids: list of servers to start or stop
- :return: a list of dictionaries with server information about the servers that were started or stopped
- """
- p = module.params
- state = p.get('state')
- changed = False
- changed_servers = []
- server_dict_array = []
- result_server_ids = []
- request_list = []
-
- if not isinstance(server_ids, list) or len(server_ids) < 1:
- return module.fail_json(
- msg='server_ids should be a list of servers, aborting')
-
- servers = clc.v2.Servers(server_ids).Servers()
- for server in servers:
- if server.powerState != state:
- changed_servers.append(server)
- if not module.check_mode:
- request_list.append(
- ClcServer._change_server_power_state(
- module,
- server,
- state))
- changed = True
-
- ClcServer._wait_for_requests(module, request_list)
- ClcServer._refresh_servers(module, changed_servers)
-
- for server in set(changed_servers + servers):
- try:
- server.data['ipaddress'] = server.details[
- 'ipAddresses'][0]['internal']
- server.data['publicip'] = str(
- server.PublicIPs().public_ips[0])
- except (KeyError, IndexError):
- pass
-
- server_dict_array.append(server.data)
- result_server_ids.append(server.id)
-
- return changed, server_dict_array, result_server_ids
-
- @staticmethod
- def _change_server_power_state(module, server, state):
- """
- Change the server powerState
- :param module: the module to check for intended state
- :param server: the server to start or stop
- :param state: the intended powerState for the server
- :return: the request object from clc-sdk call
- """
- result = None
- try:
- if state == 'started':
- result = server.PowerOn()
- else:
- # Try to shut down the server and fall back to power off when unable to shut down.
- result = server.ShutDown()
- if result and hasattr(result, 'requests') and result.requests[0]:
- return result
- else:
- result = server.PowerOff()
- except CLCException:
- module.fail_json(
- msg='Unable to change power state for server {0}'.format(
- server.id))
- return result
-
- @staticmethod
- def _find_running_servers_by_group(module, datacenter, count_group):
- """
- Find a list of running servers in the provided group
- :param module: the AnsibleModule object
- :param datacenter: the clc-sdk.Datacenter instance to use to lookup the group
- :param count_group: the group to count the servers
- :return: list of servers, and list of running servers
- """
- group = ClcServer._find_group(
- module=module,
- datacenter=datacenter,
- lookup_group=count_group)
-
- servers = group.Servers().Servers()
- running_servers = []
-
- for server in servers:
- if server.status == 'active' and server.powerState == 'started':
- running_servers.append(server)
-
- return servers, running_servers
-
- @staticmethod
- def _find_group(module, datacenter, lookup_group=None):
- """
- Find a server group in a datacenter by calling the CLC API
- :param module: the AnsibleModule instance
- :param datacenter: clc-sdk.Datacenter instance to search for the group
- :param lookup_group: string name of the group to search for
- :return: clc-sdk.Group instance
- """
- if not lookup_group:
- lookup_group = module.params.get('group')
- try:
- return datacenter.Groups().Get(lookup_group)
- except CLCException:
- pass
-
- # The search above only acts on the main
- result = ClcServer._find_group_recursive(
- module,
- datacenter.Groups(),
- lookup_group)
-
- if result is None:
- module.fail_json(
- msg=str(
- "Unable to find group: " +
- lookup_group +
- " in location: " +
- datacenter.id))
-
- return result
-
- @staticmethod
- def _find_group_recursive(module, group_list, lookup_group):
- """
- Find a server group by recursively walking the tree
- :param module: the AnsibleModule instance to use
- :param group_list: a list of groups to search
- :param lookup_group: the group to look for
- :return: list of groups
- """
- result = None
- for group in group_list.groups:
- subgroups = group.Subgroups()
- try:
- return subgroups.Get(lookup_group)
- except CLCException:
- result = ClcServer._find_group_recursive(
- module,
- subgroups,
- lookup_group)
-
- if result is not None:
- break
-
- return result
-
- @staticmethod
- def _create_clc_server(
- clc,
- module,
- server_params):
- """
- Call the CLC Rest API to Create a Server
- :param clc: the clc-python-sdk instance to use
- :param module: the AnsibleModule instance to use
- :param server_params: a dictionary of params to use to create the servers
- :return: clc-sdk.Request object linked to the queued server request
- """
-
- try:
- res = clc.v2.API.Call(
- method='POST',
- url='servers/%s' %
- (server_params.get('alias')),
- payload=json.dumps(
- {
- 'name': server_params.get('name'),
- 'description': server_params.get('description'),
- 'groupId': server_params.get('group_id'),
- 'sourceServerId': server_params.get('template'),
- 'isManagedOS': server_params.get('managed_os'),
- 'primaryDNS': server_params.get('primary_dns'),
- 'secondaryDNS': server_params.get('secondary_dns'),
- 'networkId': server_params.get('network_id'),
- 'ipAddress': server_params.get('ip_address'),
- 'password': server_params.get('password'),
- 'sourceServerPassword': server_params.get('source_server_password'),
- 'cpu': server_params.get('cpu'),
- 'cpuAutoscalePolicyId': server_params.get('cpu_autoscale_policy_id'),
- 'memoryGB': server_params.get('memory'),
- 'type': server_params.get('type'),
- 'storageType': server_params.get('storage_type'),
- 'antiAffinityPolicyId': server_params.get('anti_affinity_policy_id'),
- 'customFields': server_params.get('custom_fields'),
- 'additionalDisks': server_params.get('additional_disks'),
- 'ttl': server_params.get('ttl'),
- 'packages': server_params.get('packages'),
- 'configurationId': server_params.get('configuration_id'),
- 'osType': server_params.get('os_type')}))
-
- result = clc.v2.Requests(res)
- except APIFailedResponse as ex:
- return module.fail_json(msg='Unable to create the server: {0}. {1}'.format(
- server_params.get('name'),
- ex.response_text
- ))
-
- #
- # Patch the Request object so that it returns a valid server
-
- # Find the server's UUID from the API response
- server_uuid = [obj['id']
- for obj in res['links'] if obj['rel'] == 'self'][0]
-
- # Change the request server method to a _find_server_by_uuid closure so
- # that it will work
- result.requests[0].Server = lambda: ClcServer._find_server_by_uuid_w_retry(
- clc,
- module,
- server_uuid,
- server_params.get('alias'))
-
- return result
-
- @staticmethod
- def _get_anti_affinity_policy_id(clc, module, alias, aa_policy_name):
- """
- retrieves the anti affinity policy id of the server based on the name of the policy
- :param clc: the clc-sdk instance to use
- :param module: the AnsibleModule object
- :param alias: the CLC account alias
- :param aa_policy_name: the anti affinity policy name
- :return: aa_policy_id: The anti affinity policy id
- """
- aa_policy_id = None
- try:
- aa_policies = clc.v2.API.Call(method='GET',
- url='antiAffinityPolicies/%s' % alias)
- except APIFailedResponse as ex:
- return module.fail_json(msg='Unable to fetch anti affinity policies for account: {0}. {1}'.format(
- alias, ex.response_text))
- for aa_policy in aa_policies.get('items'):
- if aa_policy.get('name') == aa_policy_name:
- if not aa_policy_id:
- aa_policy_id = aa_policy.get('id')
- else:
- return module.fail_json(
- msg='multiple anti affinity policies were found with policy name : %s' % aa_policy_name)
- return aa_policy_id
-
- #
- # This is the function that gets patched to the Request.server object using a lambda closure
- #
-
- @staticmethod
- def _find_server_by_uuid_w_retry(
- clc, module, svr_uuid, alias=None, retries=5, back_out=2):
- """
- Find the clc server by the UUID returned from the provisioning request. Retry the request if a 404 is returned.
- :param clc: the clc-sdk instance to use
- :param module: the AnsibleModule object
- :param svr_uuid: UUID of the server
- :param retries: the number of retry attempts to make prior to fail. default is 5
- :param alias: the Account Alias to search
- :return: a clc-sdk.Server instance
- """
- if not alias:
- alias = clc.v2.Account.GetAlias()
-
- # Wait and retry if the api returns a 404
- while True:
- retries -= 1
- try:
- server_obj = clc.v2.API.Call(
- method='GET', url='servers/%s/%s?uuid=true' %
- (alias, svr_uuid))
- server_id = server_obj['id']
- server = clc.v2.Server(
- id=server_id,
- alias=alias,
- server_obj=server_obj)
- return server
-
- except APIFailedResponse as e:
- if e.response_status_code != 404:
- return module.fail_json(
- msg='A failure response was received from CLC API when '
- 'attempting to get details for a server: UUID=%s, Code=%i, Message=%s' %
- (svr_uuid, e.response_status_code, e.message))
- if retries == 0:
- return module.fail_json(
- msg='Unable to reach the CLC API after 5 attempts')
- time.sleep(back_out)
- back_out *= 2
-
- @staticmethod
- def _set_user_agent(clc):
- if hasattr(clc, 'SetRequestsSession'):
- agent_string = "ClcAnsibleModule/" + __version__
- ses = requests.Session()
- ses.headers.update({"Api-Client": agent_string})
- ses.headers['User-Agent'] += " " + agent_string
- clc.SetRequestsSession(ses)
-
-
-def main():
- """
- The main function. Instantiates the module and calls process_request.
- :return: none
- """
- argument_dict = ClcServer._define_module_argument_spec()
- module = AnsibleModule(supports_check_mode=True, **argument_dict)
- clc_server = ClcServer(module)
- clc_server.process_request()
-
-
-if __name__ == '__main__':
- main()
diff --git a/plugins/modules/clc_server_snapshot.py b/plugins/modules/clc_server_snapshot.py
deleted file mode 100644
index 82b2a99568..0000000000
--- a/plugins/modules/clc_server_snapshot.py
+++ /dev/null
@@ -1,419 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-#
-# Copyright (c) 2015 CenturyLink
-# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
-module: clc_server_snapshot
-short_description: Create, Delete and Restore server snapshots in CenturyLink Cloud
-description:
- - An Ansible module to Create, Delete and Restore server snapshots in CenturyLink Cloud.
-extends_documentation_fragment:
- - community.general.attributes
-attributes:
- check_mode:
- support: full
- diff_mode:
- support: none
-options:
- server_ids:
- description:
- - The list of CLC server Ids.
- type: list
- required: true
- elements: str
- expiration_days:
- description:
- - The number of days to keep the server snapshot before it expires.
- type: int
- default: 7
- required: false
- state:
- description:
- - The state to insure that the provided resources are in.
- type: str
- default: 'present'
- required: false
- choices: ['present', 'absent', 'restore']
- wait:
- description:
- - Whether to wait for the provisioning tasks to finish before returning.
- default: 'True'
- required: false
- type: str
-requirements:
- - python = 2.7
- - requests >= 2.5.0
- - clc-sdk
-author: "CLC Runner (@clc-runner)"
-notes:
- - To use this module, it is required to set the below environment variables which enables access to the
- Centurylink Cloud
- - CLC_V2_API_USERNAME, the account login id for the centurylink cloud
- - CLC_V2_API_PASSWORD, the account password for the centurylink cloud
- - Alternatively, the module accepts the API token and account alias. The API token can be generated using the
- CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login
- - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login
- - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud
- - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment.
-'''
-
-EXAMPLES = '''
-# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples
-
-- name: Create server snapshot
- community.general.clc_server_snapshot:
- server_ids:
- - UC1TEST-SVR01
- - UC1TEST-SVR02
- expiration_days: 10
- wait: true
- state: present
-
-- name: Restore server snapshot
- community.general.clc_server_snapshot:
- server_ids:
- - UC1TEST-SVR01
- - UC1TEST-SVR02
- wait: true
- state: restore
-
-- name: Delete server snapshot
- community.general.clc_server_snapshot:
- server_ids:
- - UC1TEST-SVR01
- - UC1TEST-SVR02
- wait: true
- state: absent
-'''
-
-RETURN = '''
-server_ids:
- description: The list of server ids that are changed
- returned: success
- type: list
- sample:
- [
- "UC1TEST-SVR01",
- "UC1TEST-SVR02"
- ]
-'''
-
-__version__ = '${version}'
-
-import os
-import traceback
-
-from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
-
-REQUESTS_IMP_ERR = None
-try:
- import requests
-except ImportError:
- REQUESTS_IMP_ERR = traceback.format_exc()
- REQUESTS_FOUND = False
-else:
- REQUESTS_FOUND = True
-
-#
-# Requires the clc-python-sdk.
-# sudo pip install clc-sdk
-#
-CLC_IMP_ERR = None
-try:
- import clc as clc_sdk
- from clc import CLCException
-except ImportError:
- CLC_IMP_ERR = traceback.format_exc()
- CLC_FOUND = False
- clc_sdk = None
-else:
- CLC_FOUND = True
-
-from ansible.module_utils.basic import AnsibleModule, missing_required_lib
-
-
-class ClcSnapshot:
-
- clc = clc_sdk
- module = None
-
- def __init__(self, module):
- """
- Construct module
- """
- self.module = module
-
- if not CLC_FOUND:
- self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR)
- if not REQUESTS_FOUND:
- self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR)
- if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'):
- self.module.fail_json(
- msg='requests library version should be >= 2.5.0')
-
- self._set_user_agent(self.clc)
-
- def process_request(self):
- """
- Process the request - Main Code Path
- :return: Returns with either an exit_json or fail_json
- """
- p = self.module.params
- server_ids = p['server_ids']
- expiration_days = p['expiration_days']
- state = p['state']
- request_list = []
- changed = False
- changed_servers = []
-
- self._set_clc_credentials_from_env()
- if state == 'present':
- changed, request_list, changed_servers = self.ensure_server_snapshot_present(
- server_ids=server_ids,
- expiration_days=expiration_days)
- elif state == 'absent':
- changed, request_list, changed_servers = self.ensure_server_snapshot_absent(
- server_ids=server_ids)
- elif state == 'restore':
- changed, request_list, changed_servers = self.ensure_server_snapshot_restore(
- server_ids=server_ids)
-
- self._wait_for_requests_to_complete(request_list)
- return self.module.exit_json(
- changed=changed,
- server_ids=changed_servers)
-
- def ensure_server_snapshot_present(self, server_ids, expiration_days):
- """
- Ensures the given set of server_ids have the snapshots created
- :param server_ids: The list of server_ids to create the snapshot
- :param expiration_days: The number of days to keep the snapshot
- :return: (changed, request_list, changed_servers)
- changed: A flag indicating whether any change was made
- request_list: the list of clc request objects from CLC API call
- changed_servers: The list of servers ids that are modified
- """
- request_list = []
- changed = False
- servers = self._get_servers_from_clc(
- server_ids,
- 'Failed to obtain server list from the CLC API')
- servers_to_change = [
- server for server in servers if len(
- server.GetSnapshots()) == 0]
- for server in servers_to_change:
- changed = True
- if not self.module.check_mode:
- request = self._create_server_snapshot(server, expiration_days)
- request_list.append(request)
- changed_servers = [
- server.id for server in servers_to_change if server.id]
- return changed, request_list, changed_servers
-
- def _create_server_snapshot(self, server, expiration_days):
- """
- Create the snapshot for the CLC server
- :param server: the CLC server object
- :param expiration_days: The number of days to keep the snapshot
- :return: the create request object from CLC API Call
- """
- result = None
- try:
- result = server.CreateSnapshot(
- delete_existing=True,
- expiration_days=expiration_days)
- except CLCException as ex:
- self.module.fail_json(msg='Failed to create snapshot for server : {0}. {1}'.format(
- server.id, ex.response_text
- ))
- return result
-
- def ensure_server_snapshot_absent(self, server_ids):
- """
- Ensures the given set of server_ids have the snapshots removed
- :param server_ids: The list of server_ids to delete the snapshot
- :return: (changed, request_list, changed_servers)
- changed: A flag indicating whether any change was made
- request_list: the list of clc request objects from CLC API call
- changed_servers: The list of servers ids that are modified
- """
- request_list = []
- changed = False
- servers = self._get_servers_from_clc(
- server_ids,
- 'Failed to obtain server list from the CLC API')
- servers_to_change = [
- server for server in servers if len(
- server.GetSnapshots()) > 0]
- for server in servers_to_change:
- changed = True
- if not self.module.check_mode:
- request = self._delete_server_snapshot(server)
- request_list.append(request)
- changed_servers = [
- server.id for server in servers_to_change if server.id]
- return changed, request_list, changed_servers
-
- def _delete_server_snapshot(self, server):
- """
- Delete snapshot for the CLC server
- :param server: the CLC server object
- :return: the delete snapshot request object from CLC API
- """
- result = None
- try:
- result = server.DeleteSnapshot()
- except CLCException as ex:
- self.module.fail_json(msg='Failed to delete snapshot for server : {0}. {1}'.format(
- server.id, ex.response_text
- ))
- return result
-
- def ensure_server_snapshot_restore(self, server_ids):
- """
- Ensures the given set of server_ids have the snapshots restored
- :param server_ids: The list of server_ids to delete the snapshot
- :return: (changed, request_list, changed_servers)
- changed: A flag indicating whether any change was made
- request_list: the list of clc request objects from CLC API call
- changed_servers: The list of servers ids that are modified
- """
- request_list = []
- changed = False
- servers = self._get_servers_from_clc(
- server_ids,
- 'Failed to obtain server list from the CLC API')
- servers_to_change = [
- server for server in servers if len(
- server.GetSnapshots()) > 0]
- for server in servers_to_change:
- changed = True
- if not self.module.check_mode:
- request = self._restore_server_snapshot(server)
- request_list.append(request)
- changed_servers = [
- server.id for server in servers_to_change if server.id]
- return changed, request_list, changed_servers
-
- def _restore_server_snapshot(self, server):
- """
- Restore snapshot for the CLC server
- :param server: the CLC server object
- :return: the restore snapshot request object from CLC API
- """
- result = None
- try:
- result = server.RestoreSnapshot()
- except CLCException as ex:
- self.module.fail_json(msg='Failed to restore snapshot for server : {0}. {1}'.format(
- server.id, ex.response_text
- ))
- return result
-
- def _wait_for_requests_to_complete(self, requests_lst):
- """
- Waits until the CLC requests are complete if the wait argument is True
- :param requests_lst: The list of CLC request objects
- :return: none
- """
- if not self.module.params['wait']:
- return
- for request in requests_lst:
- request.WaitUntilComplete()
- for request_details in request.requests:
- if request_details.Status() != 'succeeded':
- self.module.fail_json(
- msg='Unable to process server snapshot request')
-
- @staticmethod
- def define_argument_spec():
- """
- This function defines the dictionary object required for
- package module
- :return: the package dictionary object
- """
- argument_spec = dict(
- server_ids=dict(type='list', required=True, elements='str'),
- expiration_days=dict(default=7, type='int'),
- wait=dict(default=True),
- state=dict(
- default='present',
- choices=[
- 'present',
- 'absent',
- 'restore']),
- )
- return argument_spec
-
- def _get_servers_from_clc(self, server_list, message):
- """
- Internal function to fetch list of CLC server objects from a list of server ids
- :param server_list: The list of server ids
- :param message: The error message to throw in case of any error
- :return the list of CLC server objects
- """
- try:
- return self.clc.v2.Servers(server_list).servers
- except CLCException as ex:
- return self.module.fail_json(msg=message + ': %s' % ex)
-
- def _set_clc_credentials_from_env(self):
- """
- Set the CLC Credentials on the sdk by reading environment variables
- :return: none
- """
- env = os.environ
- v2_api_token = env.get('CLC_V2_API_TOKEN', False)
- v2_api_username = env.get('CLC_V2_API_USERNAME', False)
- v2_api_passwd = env.get('CLC_V2_API_PASSWD', False)
- clc_alias = env.get('CLC_ACCT_ALIAS', False)
- api_url = env.get('CLC_V2_API_URL', False)
-
- if api_url:
- self.clc.defaults.ENDPOINT_URL_V2 = api_url
-
- if v2_api_token and clc_alias:
- self.clc._LOGIN_TOKEN_V2 = v2_api_token
- self.clc._V2_ENABLED = True
- self.clc.ALIAS = clc_alias
- elif v2_api_username and v2_api_passwd:
- self.clc.v2.SetCredentials(
- api_username=v2_api_username,
- api_passwd=v2_api_passwd)
- else:
- return self.module.fail_json(
- msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD "
- "environment variables")
-
- @staticmethod
- def _set_user_agent(clc):
- if hasattr(clc, 'SetRequestsSession'):
- agent_string = "ClcAnsibleModule/" + __version__
- ses = requests.Session()
- ses.headers.update({"Api-Client": agent_string})
- ses.headers['User-Agent'] += " " + agent_string
- clc.SetRequestsSession(ses)
-
-
-def main():
- """
- Main function
- :return: None
- """
- module = AnsibleModule(
- argument_spec=ClcSnapshot.define_argument_spec(),
- supports_check_mode=True
- )
- clc_snapshot = ClcSnapshot(module)
- clc_snapshot.process_request()
-
-
-if __name__ == '__main__':
- main()
diff --git a/plugins/modules/cloud_init_data_facts.py b/plugins/modules/cloud_init_data_facts.py
index d8209cc61a..544a663e5c 100644
--- a/plugins/modules/cloud_init_data_facts.py
+++ b/plugins/modules/cloud_init_data_facts.py
@@ -8,12 +8,11 @@ from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
module: cloud_init_data_facts
short_description: Retrieve facts of cloud-init
description:
- - Gathers facts by reading the status.json and result.json of cloud-init.
+ - Gathers facts by reading the C(status.json) and C(result.json) of cloud-init.
author: René Moser (@resmo)
extends_documentation_fragment:
- community.general.attributes
@@ -22,14 +21,14 @@ extends_documentation_fragment:
options:
filter:
description:
- - Filter facts
+ - Filter facts.
type: str
- choices: [ status, result ]
+ choices: [status, result]
notes:
- See http://cloudinit.readthedocs.io/ for more information about cloud-init.
-'''
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Gather all facts of cloud init
community.general.cloud_init_data_facts:
register: result
@@ -44,47 +43,49 @@ EXAMPLES = '''
until: "res.cloud_init_data_facts.status.v1.stage is defined and not res.cloud_init_data_facts.status.v1.stage"
retries: 50
delay: 5
-'''
+"""
-RETURN = '''
----
+RETURN = r"""
cloud_init_data_facts:
description: Facts of result and status.
returned: success
type: dict
- sample: '{
- "status": {
+ sample:
+ {
+ "status": {
"v1": {
- "datasource": "DataSourceCloudStack",
- "errors": []
- },
- "result": {
- "v1": {
- "datasource": "DataSourceCloudStack",
- "init": {
- "errors": [],
- "finished": 1522066377.0185432,
- "start": 1522066375.2648022
- },
- "init-local": {
- "errors": [],
- "finished": 1522066373.70919,
- "start": 1522066373.4726632
- },
- "modules-config": {
- "errors": [],
- "finished": 1522066380.9097016,
- "start": 1522066379.0011985
- },
- "modules-final": {
- "errors": [],
- "finished": 1522066383.56594,
- "start": 1522066382.3449218
- },
- "stage": null
+ "datasource": "DataSourceCloudStack",
+ "errors": []
}
- }'
-'''
+ },
+ "result": {
+ "v1": {
+ "datasource": "DataSourceCloudStack",
+ "init": {
+ "errors": [],
+ "finished": 1522066377.0185432,
+ "start": 1522066375.2648022
+ },
+ "init-local": {
+ "errors": [],
+ "finished": 1522066373.70919,
+ "start": 1522066373.4726632
+ },
+ "modules-config": {
+ "errors": [],
+ "finished": 1522066380.9097016,
+ "start": 1522066379.0011985
+ },
+ "modules-final": {
+ "errors": [],
+ "finished": 1522066383.56594,
+ "start": 1522066382.3449218
+ },
+ "stage": null
+ }
+ }
+ }
+"""
import os
@@ -107,9 +108,8 @@ def gather_cloud_init_data_facts(module):
json_file = os.path.join(CLOUD_INIT_PATH, i + '.json')
if os.path.exists(json_file):
- f = open(json_file, 'rb')
- contents = to_text(f.read(), errors='surrogate_or_strict')
- f.close()
+ with open(json_file, 'rb') as f:
+ contents = to_text(f.read(), errors='surrogate_or_strict')
if contents:
res['cloud_init_data_facts'][i] = module.from_json(contents)
diff --git a/plugins/modules/cloudflare_dns.py b/plugins/modules/cloudflare_dns.py
index 3c0fe5f942..fafca00b50 100644
--- a/plugins/modules/cloudflare_dns.py
+++ b/plugins/modules/cloudflare_dns.py
@@ -8,16 +8,15 @@
from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = r'''
----
+DOCUMENTATION = r"""
module: cloudflare_dns
author:
-- Michael Gruener (@mgruener)
+ - Michael Gruener (@mgruener)
short_description: Manage Cloudflare DNS records
description:
- - "Manages dns records via the Cloudflare API, see the docs: U(https://api.cloudflare.com/)."
+ - 'Manages DNS records using the Cloudflare API, see the docs: U(https://api.cloudflare.com/).'
extends_documentation_fragment:
- - community.general.attributes
+ - community.general.attributes
attributes:
check_mode:
support: full
@@ -26,153 +25,163 @@ attributes:
options:
api_token:
description:
- - API token.
- - Required for api token authentication.
- - "You can obtain your API token from the bottom of the Cloudflare 'My Account' page, found here: U(https://dash.cloudflare.com/)."
- - Can be specified in E(CLOUDFLARE_TOKEN) environment variable since community.general 2.0.0.
+ - API token.
+ - Required for API token authentication.
+ - "You can obtain your API token from the bottom of the Cloudflare 'My Account' page, found here: U(https://dash.cloudflare.com/)."
+ - Can be specified in E(CLOUDFLARE_TOKEN) environment variable since community.general 2.0.0.
type: str
- required: false
version_added: '0.2.0'
account_api_key:
description:
- - Account API key.
- - Required for api keys authentication.
- - "You can obtain your API key from the bottom of the Cloudflare 'My Account' page, found here: U(https://dash.cloudflare.com/)."
+ - Account API key.
+ - Required for API keys authentication.
+ - "You can obtain your API key from the bottom of the Cloudflare 'My Account' page, found here: U(https://dash.cloudflare.com/)."
type: str
- required: false
- aliases: [ account_api_token ]
+ aliases: [account_api_token]
account_email:
description:
- - Account email. Required for API keys authentication.
+ - Account email. Required for API keys authentication.
type: str
- required: false
algorithm:
description:
- - Algorithm number.
- - Required for O(type=DS) and O(type=SSHFP) when O(state=present).
+ - Algorithm number.
+ - Required for O(type=DS) and O(type=SSHFP) when O(state=present).
type: int
cert_usage:
description:
- - Certificate usage number.
- - Required for O(type=TLSA) when O(state=present).
+ - Certificate usage number.
+ - Required for O(type=TLSA) when O(state=present).
type: int
- choices: [ 0, 1, 2, 3 ]
+ choices: [0, 1, 2, 3]
+ comment:
+ description:
+ - Comments or notes about the DNS record.
+ type: str
+ version_added: 10.1.0
flag:
description:
- - Issuer Critical Flag.
- - Required for O(type=CAA) when O(state=present).
+ - Issuer Critical Flag.
+ - Required for O(type=CAA) when O(state=present).
type: int
- choices: [ 0, 1 ]
+ choices: [0, 1]
version_added: 8.0.0
tag:
description:
- - CAA issue restriction.
- - Required for O(type=CAA) when O(state=present).
+ - CAA issue restriction.
+ - Required for O(type=CAA) when O(state=present).
type: str
- choices: [ issue, issuewild, iodef ]
+ choices: [issue, issuewild, iodef]
version_added: 8.0.0
hash_type:
description:
- - Hash type number.
- - Required for O(type=DS), O(type=SSHFP) and O(type=TLSA) when O(state=present).
+ - Hash type number.
+ - Required for O(type=DS), O(type=SSHFP) and O(type=TLSA) when O(state=present).
type: int
- choices: [ 1, 2 ]
+ choices: [1, 2]
key_tag:
description:
- - DNSSEC key tag.
- - Needed for O(type=DS) when O(state=present).
+ - DNSSEC key tag.
+ - Needed for O(type=DS) when O(state=present).
type: int
port:
description:
- - Service port.
- - Required for O(type=SRV) and O(type=TLSA).
+ - Service port.
+ - Required for O(type=SRV) and O(type=TLSA).
type: int
priority:
description:
- - Record priority.
- - Required for O(type=MX) and O(type=SRV)
+ - Record priority.
+ - Required for O(type=MX) and O(type=SRV).
default: 1
type: int
proto:
description:
- - Service protocol. Required for O(type=SRV) and O(type=TLSA).
- - Common values are TCP and UDP.
+ - Service protocol. Required for O(type=SRV) and O(type=TLSA).
+ - Common values are TCP and UDP.
type: str
proxied:
description:
- - Proxy through Cloudflare network or just use DNS.
+ - Proxy through Cloudflare network or just use DNS.
type: bool
default: false
record:
description:
- - Record to add.
- - Required if O(state=present).
- - Default is V(@) (that is, the zone name).
+ - Record to add.
+ - Required if O(state=present).
+ - Default is V(@) (that is, the zone name).
type: str
default: '@'
- aliases: [ name ]
+ aliases: [name]
selector:
description:
- - Selector number.
- - Required for O(type=TLSA) when O(state=present).
- choices: [ 0, 1 ]
+ - Selector number.
+ - Required for O(type=TLSA) when O(state=present).
+ choices: [0, 1]
type: int
service:
description:
- - Record service.
- - Required for O(type=SRV).
+ - Record service.
+ - Required for O(type=SRV).
type: str
solo:
description:
- - Whether the record should be the only one for that record type and record name.
- - Only use with O(state=present).
- - This will delete all other records with the same record name and type.
+ - Whether the record should be the only one for that record type and record name.
+ - Only use with O(state=present).
+ - This deletes all other records with the same record name and type.
type: bool
state:
description:
- - Whether the record(s) should exist or not.
+ - Whether the record(s) should exist or not.
type: str
- choices: [ absent, present ]
+ choices: [absent, present]
default: present
+ tags:
+ description:
+ - Custom tags for the DNS record.
+ type: list
+ elements: str
+ version_added: 10.1.0
timeout:
description:
- - Timeout for Cloudflare API calls.
+ - Timeout for Cloudflare API calls.
type: int
default: 30
ttl:
description:
- - The TTL to give the new record.
- - Must be between 120 and 2,147,483,647 seconds, or 1 for automatic.
+ - The TTL to give the new record.
+ - Must be between V(120) and V(2,147,483,647) seconds, or V(1) for automatic.
type: int
default: 1
type:
description:
- The type of DNS record to create. Required if O(state=present).
- - Support for V(SPF) has been removed from community.general 9.0.0 since that record type is no longer supported by CloudFlare.
+ - Support for V(SPF) has been removed from community.general 9.0.0 since that record type is no longer supported by
+ CloudFlare.
+ - Support for V(PTR) has been added in community.general 11.1.0.
type: str
- choices: [ A, AAAA, CNAME, DS, MX, NS, SRV, SSHFP, TLSA, CAA, TXT ]
+ choices: [A, AAAA, CNAME, DS, MX, NS, SRV, SSHFP, TLSA, CAA, TXT, PTR]
value:
description:
- - The record value.
- - Required for O(state=present).
+ - The record value.
+ - Required for O(state=present).
type: str
- aliases: [ content ]
+ aliases: [content]
weight:
description:
- - Service weight.
- - Required for O(type=SRV).
+ - Service weight.
+ - Required for O(type=SRV).
type: int
default: 1
zone:
description:
- - The name of the Zone to work with (e.g. "example.com").
- - The Zone must already exist.
+ - The name of the Zone to work with (for example V(example.com)).
+ - The Zone must already exist.
type: str
required: true
- aliases: [ domain ]
-'''
+ aliases: [domain]
+"""
-EXAMPLES = r'''
+EXAMPLES = r"""
- name: Create a test.example.net A record to point to 127.0.0.1
community.general.cloudflare_dns:
zone: example.net
@@ -191,6 +200,18 @@ EXAMPLES = r'''
value: 127.0.0.1
api_token: dummyapitoken
+- name: Create a record with comment and tags
+ community.general.cloudflare_dns:
+ zone: example.net
+ record: test
+ type: A
+ value: 127.0.0.1
+ comment: Local test website
+ tags:
+ - test
+ - local
+ api_token: dummyapitoken
+
- name: Create a example.net CNAME record to example.com
community.general.cloudflare_dns:
zone: example.net
@@ -291,98 +312,132 @@ EXAMPLES = r'''
algorithm: 8
hash_type: 2
value: B4EB5AC4467D2DFB3BAF9FB9961DC1B6FED54A58CDFAA3E465081EC86F89BFAB
-'''
-RETURN = r'''
+- name: Create PTR record "1.2.0.192.in-addr.arpa" with value "test.example.com"
+ community.general.cloudflare_dns:
+ zone: 2.0.192.in-addr.arpa
+ record: 1
+ type: PTR
+ value: test.example.com
+ state: present
+"""
+
+RETURN = r"""
record:
- description: A dictionary containing the record data.
- returned: success, except on record deletion
- type: complex
- contains:
- content:
- description: The record content (details depend on record type).
- returned: success
- type: str
- sample: 192.0.2.91
- created_on:
- description: The record creation date.
- returned: success
- type: str
- sample: "2016-03-25T19:09:42.516553Z"
- data:
- description: Additional record data.
- returned: success, if type is SRV, DS, SSHFP TLSA or CAA
- type: dict
- sample: {
- name: "jabber",
- port: 8080,
- priority: 10,
- proto: "_tcp",
- service: "_xmpp",
- target: "jabberhost.sample.com",
- weight: 5,
- }
- id:
- description: The record ID.
- returned: success
- type: str
- sample: f9efb0549e96abcb750de63b38c9576e
- locked:
- description: No documentation available.
- returned: success
- type: bool
- sample: false
- meta:
- description: No documentation available.
- returned: success
- type: dict
- sample: { auto_added: false }
- modified_on:
- description: Record modification date.
- returned: success
- type: str
- sample: "2016-03-25T19:09:42.516553Z"
- name:
- description: The record name as FQDN (including _service and _proto for SRV).
- returned: success
- type: str
- sample: www.sample.com
- priority:
- description: Priority of the MX record.
- returned: success, if type is MX
- type: int
- sample: 10
- proxiable:
- description: Whether this record can be proxied through Cloudflare.
- returned: success
- type: bool
- sample: false
- proxied:
- description: Whether the record is proxied through Cloudflare.
- returned: success
- type: bool
- sample: false
- ttl:
- description: The time-to-live for the record.
- returned: success
- type: int
- sample: 300
- type:
- description: The record type.
- returned: success
- type: str
- sample: A
- zone_id:
- description: The ID of the zone containing the record.
- returned: success
- type: str
- sample: abcede0bf9f0066f94029d2e6b73856a
- zone_name:
- description: The name of the zone containing the record.
- returned: success
- type: str
- sample: sample.com
-'''
+ description: A dictionary containing the record data.
+ returned: success, except on record deletion
+ type: complex
+ contains:
+ comment:
+ description: Comments or notes about the DNS record.
+ returned: success
+ type: str
+ sample: Domain verification record
+ version_added: 10.1.0
+ comment_modified_on:
+ description: When the record comment was last modified. Omitted if there is no comment.
+ returned: success
+ type: str
+ sample: "2024-01-01T05:20:00.12345Z"
+ version_added: 10.1.0
+ content:
+ description: The record content (details depend on record type).
+ returned: success
+ type: str
+ sample: 192.0.2.91
+ created_on:
+ description: The record creation date.
+ returned: success
+ type: str
+ sample: "2016-03-25T19:09:42.516553Z"
+ data:
+ description: Additional record data.
+ returned: success, if type is SRV, DS, SSHFP TLSA or CAA
+ type: dict
+ sample:
+ {
+ "name": "jabber",
+ "port": 8080,
+ "priority": 10,
+ "proto": "_tcp",
+ "service": "_xmpp",
+ "target": "jabberhost.sample.com",
+ "weight": 5
+ }
+ id:
+ description: The record ID.
+ returned: success
+ type: str
+ sample: f9efb0549e96abcb750de63b38c9576e
+ locked:
+ description: No documentation available.
+ returned: success
+ type: bool
+ sample: false
+ meta:
+ description: Extra Cloudflare-specific information about the record.
+ returned: success
+ type: dict
+ sample: {"auto_added": false}
+ modified_on:
+ description: Record modification date.
+ returned: success
+ type: str
+ sample: "2016-03-25T19:09:42.516553Z"
+ name:
+ description: The record name as FQDN (including _service and _proto for SRV).
+ returned: success
+ type: str
+ sample: www.sample.com
+ priority:
+ description: Priority of the MX record.
+ returned: success, if type is MX
+ type: int
+ sample: 10
+ proxiable:
+ description: Whether this record can be proxied through Cloudflare.
+ returned: success
+ type: bool
+ sample: false
+ proxied:
+ description: Whether the record is proxied through Cloudflare.
+ returned: success
+ type: bool
+ sample: false
+ tags:
+ description: Custom tags for the DNS record.
+ returned: success
+ type: list
+ elements: str
+ sample: ["production", "app"]
+ version_added: 10.1.0
+ tags_modified_on:
+ description: When the record tags were last modified. Omitted if there are no tags.
+ returned: success
+ type: str
+ sample: "2025-01-01T05:20:00.12345Z"
+ version_added: 10.1.0
+ ttl:
+ description: The time-to-live for the record.
+ returned: success
+ type: int
+ sample: 300
+ type:
+ description: The record type.
+ returned: success
+ type: str
+ sample: A
+ zone_id:
+ description: The ID of the zone containing the record.
+ returned: success
+ type: str
+ sample: abcede0bf9f0066f94029d2e6b73856a
+ zone_name:
+ description: The name of the zone containing the record.
+ returned: success
+ type: str
+ sample: sample.com
+"""
import json
@@ -393,9 +448,11 @@ from ansible.module_utils.urls import fetch_url
def lowercase_string(param):
- if not isinstance(param, str):
- return param
- return param.lower()
+ return param.lower() if isinstance(param, str) else param
+
+
+def join_str(sep, *args):
+ return sep.join([str(arg) for arg in args])
class CloudflareAPI(object):
@@ -410,9 +467,11 @@ class CloudflareAPI(object):
self.account_email = module.params['account_email']
self.algorithm = module.params['algorithm']
self.cert_usage = module.params['cert_usage']
+ self.comment = module.params['comment']
self.hash_type = module.params['hash_type']
self.flag = module.params['flag']
self.tag = module.params['tag']
+ self.tags = module.params['tags']
self.key_tag = module.params['key_tag']
self.port = module.params['port']
self.priority = module.params['priority']
@@ -439,29 +498,29 @@ class CloudflareAPI(object):
if (self.type == 'AAAA') and (self.value is not None):
self.value = self.value.lower()
- if (self.type == 'SRV'):
+ if self.type == 'SRV':
if (self.proto is not None) and (not self.proto.startswith('_')):
- self.proto = '_' + self.proto
+ self.proto = '_{0}'.format(self.proto)
if (self.service is not None) and (not self.service.startswith('_')):
- self.service = '_' + self.service
+ self.service = '_{0}'.format(self.service)
- if (self.type == 'TLSA'):
+ if self.type == 'TLSA':
if (self.proto is not None) and (not self.proto.startswith('_')):
- self.proto = '_' + self.proto
+ self.proto = '_{0}'.format(self.proto)
if (self.port is not None):
- self.port = '_' + str(self.port)
+ self.port = '_{0}'.format(self.port)
if not self.record.endswith(self.zone):
- self.record = self.record + '.' + self.zone
+ self.record = join_str('.', self.record, self.zone)
- if (self.type == 'DS'):
+ if self.type == 'DS':
if self.record == self.zone:
self.module.fail_json(msg="DS records only apply to subdomains.")
def _cf_simple_api_call(self, api_call, method='GET', payload=None):
if self.api_token:
headers = {
- 'Authorization': 'Bearer ' + self.api_token,
+ 'Authorization': 'Bearer {0}'.format(self.api_token),
'Content-Type': 'application/json',
}
else:
@@ -561,7 +620,7 @@ class CloudflareAPI(object):
else:
raw_api_call = api_call
while next_page <= pagination['total_pages']:
- raw_api_call += '?' + '&'.join(parameters)
+ raw_api_call += '?{0}'.format('&'.join(parameters))
result, status = self._cf_simple_api_call(raw_api_call, method, payload)
data += result['result']
next_page += 1
@@ -586,8 +645,8 @@ class CloudflareAPI(object):
name = self.zone
param = ''
if name:
- param = '?' + urlencode({'name': name})
- zones, status = self._cf_api_call('/zones' + param)
+ param = '?{0}'.format(urlencode({'name': name}))
+ zones, status = self._cf_api_call('/zones{0}'.format(param))
return zones
def get_dns_records(self, zone_name=None, type=None, record=None, value=''):
@@ -612,48 +671,40 @@ class CloudflareAPI(object):
if value:
query['content'] = value
if query:
- api_call += '?' + urlencode(query)
+ api_call += '?{0}'.format(urlencode(query))
records, status = self._cf_api_call(api_call)
return records
- def delete_dns_records(self, **kwargs):
- params = {}
- for param in ['port', 'proto', 'service', 'solo', 'type', 'record', 'value', 'weight', 'zone',
- 'algorithm', 'cert_usage', 'hash_type', 'selector', 'key_tag', 'flag', 'tag']:
- if param in kwargs:
- params[param] = kwargs[param]
- else:
- params[param] = getattr(self, param)
-
+ def delete_dns_records(self, solo):
records = []
- content = params['value']
- search_record = params['record']
- if params['type'] == 'SRV':
- if not (params['value'] is None or params['value'] == ''):
- content = str(params['weight']) + '\t' + str(params['port']) + '\t' + params['value']
- search_record = params['service'] + '.' + params['proto'] + '.' + params['record']
- elif params['type'] == 'DS':
- if not (params['value'] is None or params['value'] == ''):
- content = str(params['key_tag']) + '\t' + str(params['algorithm']) + '\t' + str(params['hash_type']) + '\t' + params['value']
- elif params['type'] == 'SSHFP':
- if not (params['value'] is None or params['value'] == ''):
- content = str(params['algorithm']) + ' ' + str(params['hash_type']) + ' ' + params['value'].upper()
- elif params['type'] == 'TLSA':
- if not (params['value'] is None or params['value'] == ''):
- content = str(params['cert_usage']) + '\t' + str(params['selector']) + '\t' + str(params['hash_type']) + '\t' + params['value']
- search_record = params['port'] + '.' + params['proto'] + '.' + params['record']
- if params['solo']:
+ content = self.value
+ search_record = self.record
+ if self.type == 'SRV':
+ if not (self.value is None or self.value == ''):
+ content = join_str('\t', self.weight, self.port, self.value)
+ search_record = join_str('.', self.service, self.proto, self.record)
+ elif self.type == 'DS':
+ if not (self.value is None or self.value == ''):
+ content = join_str('\t', self.key_tag, self.algorithm, self.hash_type, self.value)
+ elif self.type == 'SSHFP':
+ if not (self.value is None or self.value == ''):
+ content = join_str(' ', self.algorithm, self.hash_type, self.value.upper())
+ elif self.type == 'TLSA':
+ if not (self.value is None or self.value == ''):
+ content = join_str('\t', self.cert_usage, self.selector, self.hash_type, self.value)
+ search_record = join_str('.', self.port, self.proto, self.record)
+ if solo:
search_value = None
else:
search_value = content
- zone_id = self._get_zone_id(params['zone'])
- records = self.get_dns_records(params['zone'], params['type'], search_record, search_value)
+ zone_id = self._get_zone_id(self.zone)
+ records = self.get_dns_records(self.zone, self.type, search_record, search_value)
for rr in records:
- if params['solo']:
- if not ((rr['type'] == params['type']) and (rr['name'] == search_record) and (rr['content'] == content)):
+ if solo:
+ if not ((rr['type'] == self.type) and (rr['name'] == search_record) and (rr['content'] == content)):
self.changed = True
if not self.module.check_mode:
result, info = self._cf_api_call('/zones/{0}/dns_records/{1}'.format(zone_id, rr['id']), 'DELETE')
@@ -663,153 +714,146 @@ class CloudflareAPI(object):
result, info = self._cf_api_call('/zones/{0}/dns_records/{1}'.format(zone_id, rr['id']), 'DELETE')
return self.changed
- def ensure_dns_record(self, **kwargs):
- params = {}
- for param in ['port', 'priority', 'proto', 'proxied', 'service', 'ttl', 'type', 'record', 'value', 'weight', 'zone',
- 'algorithm', 'cert_usage', 'hash_type', 'selector', 'key_tag', 'flag', 'tag']:
- if param in kwargs:
- params[param] = kwargs[param]
- else:
- params[param] = getattr(self, param)
-
- search_value = params['value']
- search_record = params['record']
+ def ensure_dns_record(self):
+ search_value = self.value
+ search_record = self.record
new_record = None
- if (params['type'] is None) or (params['record'] is None):
- self.module.fail_json(msg="You must provide a type and a record to create a new record")
- if (params['type'] in ['A', 'AAAA', 'CNAME', 'TXT', 'MX', 'NS']):
- if not params['value']:
+ if self.type in ['A', 'AAAA', 'CNAME', 'TXT', 'MX', 'NS', 'PTR']:
+ if not self.value:
self.module.fail_json(msg="You must provide a non-empty value to create this record type")
# there can only be one CNAME per record
# ignoring the value when searching for existing
# CNAME records allows us to update the value if it
# changes
- if params['type'] == 'CNAME':
+ if self.type == 'CNAME':
search_value = None
new_record = {
- "type": params['type'],
- "name": params['record'],
- "content": params['value'],
- "ttl": params['ttl']
+ "type": self.type,
+ "name": self.record,
+ "content": self.value,
+ "ttl": self.ttl
}
- if (params['type'] in ['A', 'AAAA', 'CNAME']):
- new_record["proxied"] = params["proxied"]
+ if self.type in ['A', 'AAAA', 'CNAME']:
+ new_record["proxied"] = self.proxied
- if params['type'] == 'MX':
- for attr in [params['priority'], params['value']]:
+ if self.type == 'MX':
+ for attr in [self.priority, self.value]:
if (attr is None) or (attr == ''):
self.module.fail_json(msg="You must provide priority and a value to create this record type")
new_record = {
- "type": params['type'],
- "name": params['record'],
- "content": params['value'],
- "priority": params['priority'],
- "ttl": params['ttl']
+ "type": self.type,
+ "name": self.record,
+ "content": self.value,
+ "priority": self.priority,
+ "ttl": self.ttl
}
- if params['type'] == 'SRV':
- for attr in [params['port'], params['priority'], params['proto'], params['service'], params['weight'], params['value']]:
+ if self.type == 'SRV':
+ for attr in [self.port, self.priority, self.proto, self.service, self.weight, self.value]:
if (attr is None) or (attr == ''):
self.module.fail_json(msg="You must provide port, priority, proto, service, weight and a value to create this record type")
srv_data = {
- "target": params['value'],
- "port": params['port'],
- "weight": params['weight'],
- "priority": params['priority'],
+ "target": self.value,
+ "port": self.port,
+ "weight": self.weight,
+ "priority": self.priority,
}
new_record = {
- "type": params['type'],
- "name": params['service'] + '.' + params['proto'] + '.' + params['record'],
- "ttl": params['ttl'],
+ "type": self.type,
+ "name": join_str('.', self.service, self.proto, self.record),
+ "ttl": self.ttl,
'data': srv_data,
}
- search_value = str(params['weight']) + '\t' + str(params['port']) + '\t' + params['value']
- search_record = params['service'] + '.' + params['proto'] + '.' + params['record']
+ search_value = join_str('\t', self.weight, self.port, self.value)
+ search_record = join_str('.', self.service, self.proto, self.record)
- if params['type'] == 'DS':
- for attr in [params['key_tag'], params['algorithm'], params['hash_type'], params['value']]:
+ if self.type == 'DS':
+ for attr in [self.key_tag, self.algorithm, self.hash_type, self.value]:
if (attr is None) or (attr == ''):
self.module.fail_json(msg="You must provide key_tag, algorithm, hash_type and a value to create this record type")
ds_data = {
- "key_tag": params['key_tag'],
- "algorithm": params['algorithm'],
- "digest_type": params['hash_type'],
- "digest": params['value'],
+ "key_tag": self.key_tag,
+ "algorithm": self.algorithm,
+ "digest_type": self.hash_type,
+ "digest": self.value,
}
new_record = {
- "type": params['type'],
- "name": params['record'],
+ "type": self.type,
+ "name": self.record,
'data': ds_data,
- "ttl": params['ttl'],
+ "ttl": self.ttl,
}
- search_value = str(params['key_tag']) + '\t' + str(params['algorithm']) + '\t' + str(params['hash_type']) + '\t' + params['value']
+ search_value = join_str('\t', self.key_tag, self.algorithm, self.hash_type, self.value)
- if params['type'] == 'SSHFP':
- for attr in [params['algorithm'], params['hash_type'], params['value']]:
+ if self.type == 'SSHFP':
+ for attr in [self.algorithm, self.hash_type, self.value]:
if (attr is None) or (attr == ''):
self.module.fail_json(msg="You must provide algorithm, hash_type and a value to create this record type")
sshfp_data = {
- "fingerprint": params['value'].upper(),
- "type": params['hash_type'],
- "algorithm": params['algorithm'],
+ "fingerprint": self.value.upper(),
+ "type": self.hash_type,
+ "algorithm": self.algorithm,
}
new_record = {
- "type": params['type'],
- "name": params['record'],
+ "type": self.type,
+ "name": self.record,
'data': sshfp_data,
- "ttl": params['ttl'],
+ "ttl": self.ttl,
}
- search_value = str(params['algorithm']) + ' ' + str(params['hash_type']) + ' ' + params['value']
+ search_value = join_str(' ', self.algorithm, self.hash_type, self.value)
- if params['type'] == 'TLSA':
- for attr in [params['port'], params['proto'], params['cert_usage'], params['selector'], params['hash_type'], params['value']]:
+ if self.type == 'TLSA':
+ for attr in [self.port, self.proto, self.cert_usage, self.selector, self.hash_type, self.value]:
if (attr is None) or (attr == ''):
self.module.fail_json(msg="You must provide port, proto, cert_usage, selector, hash_type and a value to create this record type")
- search_record = params['port'] + '.' + params['proto'] + '.' + params['record']
+ search_record = join_str('.', self.port, self.proto, self.record)
tlsa_data = {
- "usage": params['cert_usage'],
- "selector": params['selector'],
- "matching_type": params['hash_type'],
- "certificate": params['value'],
+ "usage": self.cert_usage,
+ "selector": self.selector,
+ "matching_type": self.hash_type,
+ "certificate": self.value,
}
new_record = {
- "type": params['type'],
+ "type": self.type,
"name": search_record,
'data': tlsa_data,
- "ttl": params['ttl'],
+ "ttl": self.ttl,
}
- search_value = str(params['cert_usage']) + '\t' + str(params['selector']) + '\t' + str(params['hash_type']) + '\t' + params['value']
+ search_value = join_str('\t', self.cert_usage, self.selector, self.hash_type, self.value)
- if params['type'] == 'CAA':
- for attr in [params['flag'], params['tag'], params['value']]:
- if (attr is None) or (attr == ''):
+ if self.type == 'CAA':
+ for attr in [self.flag, self.tag, self.value]:
+ if attr == '':
self.module.fail_json(msg="You must provide flag, tag and a value to create this record type")
caa_data = {
- "flags": params['flag'],
- "tag": params['tag'],
- "value": params['value'],
+ "flags": self.flag,
+ "tag": self.tag,
+ "value": self.value,
}
new_record = {
- "type": params['type'],
- "name": params['record'],
+ "type": self.type,
+ "name": self.record,
'data': caa_data,
- "ttl": params['ttl'],
+ "ttl": self.ttl,
}
search_value = None
- zone_id = self._get_zone_id(params['zone'])
- records = self.get_dns_records(params['zone'], params['type'], search_record, search_value)
+ new_record['comment'] = self.comment or None
+ new_record['tags'] = self.tags or []
+
+ zone_id = self._get_zone_id(self.zone)
+ records = self.get_dns_records(self.zone, self.type, search_record, search_value)
# in theory this should be impossible as cloudflare does not allow
# the creation of duplicate records but lets cover it anyways
if len(records) > 1:
# As Cloudflare API cannot filter record containing quotes
# CAA records must be compared locally
- if params['type'] == 'CAA':
+ if self.type == 'CAA':
for rr in records:
if rr['data']['flags'] == caa_data['flags'] and rr['data']['tag'] == caa_data['tag'] and rr['data']['value'] == caa_data['value']:
return rr, self.changed
@@ -819,16 +863,20 @@ class CloudflareAPI(object):
if len(records) == 1:
cur_record = records[0]
do_update = False
- if (params['ttl'] is not None) and (cur_record['ttl'] != params['ttl']):
+ if (self.ttl is not None) and (cur_record['ttl'] != self.ttl):
do_update = True
- if (params['priority'] is not None) and ('priority' in cur_record) and (cur_record['priority'] != params['priority']):
+ if (self.priority is not None) and ('priority' in cur_record) and (cur_record['priority'] != self.priority):
do_update = True
- if ('proxied' in new_record) and ('proxied' in cur_record) and (cur_record['proxied'] != params['proxied']):
+ if ('proxied' in new_record) and ('proxied' in cur_record) and (cur_record['proxied'] != self.proxied):
do_update = True
if ('data' in new_record) and ('data' in cur_record):
- if (cur_record['data'] != new_record['data']):
+ if cur_record['data'] != new_record['data']:
do_update = True
- if (params['type'] == 'CNAME') and (cur_record['content'] != new_record['content']):
+ if (self.type == 'CNAME') and (cur_record['content'] != new_record['content']):
+ do_update = True
+ if cur_record['comment'] != new_record['comment']:
+ do_update = True
+ if sorted(cur_record['tags']) != sorted(new_record['tags']):
do_update = True
if do_update:
if self.module.check_mode:
@@ -850,21 +898,18 @@ class CloudflareAPI(object):
def main():
module = AnsibleModule(
argument_spec=dict(
- api_token=dict(
- type="str",
- required=False,
- no_log=True,
- fallback=(env_fallback, ["CLOUDFLARE_TOKEN"]),
- ),
- account_api_key=dict(type='str', required=False, no_log=True, aliases=['account_api_token']),
- account_email=dict(type='str', required=False),
+ api_token=dict(type="str", no_log=True, fallback=(env_fallback, ["CLOUDFLARE_TOKEN"])),
+ account_api_key=dict(type='str', no_log=True, aliases=['account_api_token']),
+ account_email=dict(type='str'),
algorithm=dict(type='int'),
cert_usage=dict(type='int', choices=[0, 1, 2, 3]),
+ comment=dict(type='str'),
hash_type=dict(type='int', choices=[1, 2]),
key_tag=dict(type='int', no_log=False),
port=dict(type='int'),
flag=dict(type='int', choices=[0, 1]),
tag=dict(type='str', choices=['issue', 'issuewild', 'iodef']),
+ tags=dict(type='list', elements='str'),
priority=dict(type='int', default=1),
proto=dict(type='str'),
proxied=dict(type='bool', default=False),
@@ -875,7 +920,7 @@ def main():
state=dict(type='str', default='present', choices=['absent', 'present']),
timeout=dict(type='int', default=30),
ttl=dict(type='int', default=1),
- type=dict(type='str', choices=['A', 'AAAA', 'CNAME', 'DS', 'MX', 'NS', 'SRV', 'SSHFP', 'TLSA', 'CAA', 'TXT']),
+ type=dict(type='str', choices=['A', 'AAAA', 'CNAME', 'DS', 'MX', 'NS', 'SRV', 'SSHFP', 'TLSA', 'CAA', 'TXT', 'PTR']),
value=dict(type='str', aliases=['content']),
weight=dict(type='int', default=1),
zone=dict(type='str', required=True, aliases=['domain']),
@@ -884,20 +929,21 @@ def main():
required_if=[
('state', 'present', ['record', 'type', 'value']),
('state', 'absent', ['record']),
- ('type', 'SRV', ['proto', 'service']),
+ ('type', 'SRV', ['proto', 'service', 'value']),
('type', 'TLSA', ['proto', 'port']),
- ('type', 'CAA', ['flag', 'tag']),
+ ('type', 'CAA', ['flag', 'tag', 'value']),
+ ],
+ required_together=[
+ ('account_api_key', 'account_email'),
+ ],
+ required_one_of=[
+ ['api_token', 'account_api_key'],
],
)
- if not module.params['api_token'] and not (module.params['account_api_key'] and module.params['account_email']):
- module.fail_json(msg="Either api_token or account_api_key and account_email params are required.")
if module.params['type'] == 'SRV':
- if not ((module.params['weight'] is not None and module.params['port'] is not None
- and not (module.params['value'] is None or module.params['value'] == ''))
- or (module.params['weight'] is None and module.params['port'] is None
- and (module.params['value'] is None or module.params['value'] == ''))):
- module.fail_json(msg="For SRV records the params weight, port and value all need to be defined, or not at all.")
+ if not module.params['value'] == '':
+ module.fail_json(msg="For SRV records the params weight, port and value all need to be defined.")
if module.params['type'] == 'SSHFP':
if not ((module.params['algorithm'] is not None and module.params['hash_type'] is not None
@@ -914,11 +960,8 @@ def main():
module.fail_json(msg="For TLSA records the params cert_usage, selector, hash_type and value all need to be defined, or not at all.")
if module.params['type'] == 'CAA':
- if not ((module.params['flag'] is not None and module.params['tag'] is not None
- and not (module.params['value'] is None or module.params['value'] == ''))
- or (module.params['flag'] is None and module.params['tag'] is None
- and (module.params['value'] is None or module.params['value'] == ''))):
- module.fail_json(msg="For CAA records the params flag, tag and value all need to be defined, or not at all.")
+ if not module.params['value'] == '':
+ module.fail_json(msg="For CAA records the params flag, tag and value all need to be defined.")
if module.params['type'] == 'DS':
if not ((module.params['key_tag'] is not None and module.params['algorithm'] is not None and module.params['hash_type'] is not None
diff --git a/plugins/modules/cobbler_sync.py b/plugins/modules/cobbler_sync.py
index 27f57028be..b1c92a1690 100644
--- a/plugins/modules/cobbler_sync.py
+++ b/plugins/modules/cobbler_sync.py
@@ -8,8 +8,7 @@
from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = r'''
----
+DOCUMENTATION = r"""
module: cobbler_sync
short_description: Sync Cobbler
description:
@@ -24,44 +23,44 @@ attributes:
options:
host:
description:
- - The name or IP address of the Cobbler system.
+ - The name or IP address of the Cobbler system.
default: 127.0.0.1
type: str
port:
description:
- - Port number to be used for REST connection.
- - The default value depends on parameter O(use_ssl).
+ - Port number to be used for REST connection.
+ - The default value depends on parameter O(use_ssl).
type: int
username:
description:
- - The username to log in to Cobbler.
+ - The username to log in to Cobbler.
default: cobbler
type: str
password:
description:
- - The password to log in to Cobbler.
+ - The password to log in to Cobbler.
type: str
use_ssl:
description:
- - If V(false), an HTTP connection will be used instead of the default HTTPS connection.
+ - If V(false), an HTTP connection is used instead of the default HTTPS connection.
type: bool
default: true
validate_certs:
description:
- - If V(false), SSL certificates will not be validated.
- - This should only set to V(false) when used on personally controlled sites using self-signed certificates.
+ - If V(false), SSL certificates are not validated.
+ - This should only set to V(false) when used on personally controlled sites using self-signed certificates.
type: bool
default: true
author:
-- Dag Wieers (@dagwieers)
+ - Dag Wieers (@dagwieers)
todo:
notes:
-- Concurrently syncing Cobbler is bound to fail with weird errors.
-- On python 2.7.8 and older (i.e. on RHEL7) you may need to tweak the python behaviour to disable certificate validation.
- More information at L(Certificate verification in Python standard library HTTP clients,https://access.redhat.com/articles/2039753).
-'''
+ - Concurrently syncing Cobbler is bound to fail with weird errors.
+ - On Python 2.7.8 and older (such as RHEL7) you may need to tweak the Python behaviour to disable certificate validation.
+ More information at L(Certificate verification in Python standard library HTTP clients,https://access.redhat.com/articles/2039753).
+"""
-EXAMPLES = r'''
+EXAMPLES = r"""
- name: Commit Cobbler changes
community.general.cobbler_sync:
host: cobbler01
@@ -69,11 +68,11 @@ EXAMPLES = r'''
password: MySuperSecureP4sswOrd
run_once: true
delegate_to: localhost
-'''
+"""
-RETURN = r'''
+RETURN = r"""
# Default return values
-'''
+"""
import ssl
diff --git a/plugins/modules/cobbler_system.py b/plugins/modules/cobbler_system.py
index e97d42ffac..a1a400928e 100644
--- a/plugins/modules/cobbler_system.py
+++ b/plugins/modules/cobbler_system.py
@@ -8,12 +8,11 @@
from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = r'''
----
+DOCUMENTATION = r"""
module: cobbler_system
short_description: Manage system objects in Cobbler
description:
- - Add, modify or remove systems in Cobbler
+ - Add, modify or remove systems in Cobbler.
extends_documentation_fragment:
- community.general.attributes
attributes:
@@ -24,67 +23,67 @@ attributes:
options:
host:
description:
- - The name or IP address of the Cobbler system.
+ - The name or IP address of the Cobbler system.
default: 127.0.0.1
type: str
port:
description:
- - Port number to be used for REST connection.
- - The default value depends on parameter O(use_ssl).
+ - Port number to be used for REST connection.
+ - The default value depends on parameter O(use_ssl).
type: int
username:
description:
- - The username to log in to Cobbler.
+ - The username to log in to Cobbler.
default: cobbler
type: str
password:
description:
- - The password to log in to Cobbler.
+ - The password to log in to Cobbler.
type: str
use_ssl:
description:
- - If V(false), an HTTP connection will be used instead of the default HTTPS connection.
+ - If V(false), an HTTP connection is used instead of the default HTTPS connection.
type: bool
default: true
validate_certs:
description:
- - If V(false), SSL certificates will not be validated.
- - This should only set to V(false) when used on personally controlled sites using self-signed certificates.
+ - If V(false), SSL certificates are not validated.
+ - This should only set to V(false) when used on personally controlled sites using self-signed certificates.
type: bool
default: true
name:
description:
- - The system name to manage.
+ - The system name to manage.
type: str
properties:
description:
- - A dictionary with system properties.
+ - A dictionary with system properties.
type: dict
interfaces:
description:
- - A list of dictionaries containing interface options.
+ - A list of dictionaries containing interface options.
type: dict
sync:
description:
- - Sync on changes.
- - Concurrently syncing Cobbler is bound to fail.
+ - Sync on changes.
+ - Concurrently syncing Cobbler is bound to fail.
type: bool
default: false
state:
description:
- - Whether the system should be present, absent or a query is made.
- choices: [ absent, present, query ]
+ - Whether the system should be present, absent or a query is made.
+ choices: [absent, present, query]
default: present
type: str
author:
-- Dag Wieers (@dagwieers)
+ - Dag Wieers (@dagwieers)
notes:
-- Concurrently syncing Cobbler is bound to fail with weird errors.
-- On python 2.7.8 and older (i.e. on RHEL7) you may need to tweak the python behaviour to disable certificate validation.
- More information at L(Certificate verification in Python standard library HTTP clients,https://access.redhat.com/articles/2039753).
-'''
+ - Concurrently syncing Cobbler is bound to fail with weird errors.
+ - On Python 2.7.8 and older (such as RHEL7) you may need to tweak the Python behaviour to disable certificate validation.
+ More information at L(Certificate verification in Python standard library HTTP clients,https://access.redhat.com/articles/2039753).
+"""
-EXAMPLES = r'''
+EXAMPLES = r"""
- name: Ensure the system exists in Cobbler
community.general.cobbler_system:
host: cobbler01
@@ -93,7 +92,7 @@ EXAMPLES = r'''
name: myhost
properties:
profile: CentOS6-x86_64
- name_servers: [ 2.3.4.5, 3.4.5.6 ]
+ name_servers: [2.3.4.5, 3.4.5.6]
name_servers_search: foo.com, bar.com
interfaces:
eth0:
@@ -139,18 +138,18 @@ EXAMPLES = r'''
name: myhost
state: absent
delegate_to: localhost
-'''
+"""
-RETURN = r'''
+RETURN = r"""
systems:
- description: List of systems
+ description: List of systems.
returned: O(state=query) and O(name) is not provided
type: list
system:
- description: (Resulting) information about the system we are working with
+ description: (Resulting) information about the system we are working with.
returned: when O(name) is provided
type: dict
-'''
+"""
import ssl
@@ -164,7 +163,6 @@ from ansible_collections.community.general.plugins.module_utils.datetime import
)
from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
-
IFPROPS_MAPPING = dict(
bondingopts='bonding_opts',
bridgeopts='bridge_opts',
@@ -281,8 +279,8 @@ def main():
if system:
# Update existing entry
- system_id = None
- if LooseVersion(str(conn.version())) >= LooseVersion('3.4.0'):
+ system_id = ''
+ if LooseVersion(str(conn.version())) >= LooseVersion('3.4'):
system_id = conn.get_system_handle(name)
else:
system_id = conn.get_system_handle(name, token)
diff --git a/plugins/modules/composer.py b/plugins/modules/composer.py
index 3d1c4a3465..735b4d2d36 100644
--- a/plugins/modules/composer.py
+++ b/plugins/modules/composer.py
@@ -9,115 +9,114 @@ from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
module: composer
author:
- - "Dimitrios Tydeas Mengidis (@dmtrs)"
- - "René Moser (@resmo)"
+ - "Dimitrios Tydeas Mengidis (@dmtrs)"
+ - "René Moser (@resmo)"
short_description: Dependency Manager for PHP
description:
- - >
- Composer is a tool for dependency management in PHP. It allows you to
- declare the dependent libraries your project needs and it will install
- them in your project for you.
+ - Composer is a tool for dependency management in PHP. It allows you to declare the dependent libraries your project needs
+ and it installs them in your project for you.
extends_documentation_fragment:
- - community.general.attributes
+ - community.general.attributes
attributes:
- check_mode:
- support: full
- diff_mode:
- support: none
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
options:
- command:
- type: str
- description:
- - Composer command like "install", "update" and so on.
- default: install
- arguments:
- type: str
- description:
- - Composer arguments like required package, version and so on.
- default: ''
- executable:
- type: path
- description:
- - Path to PHP Executable on the remote host, if PHP is not in PATH.
- aliases: [ php_path ]
- working_dir:
- type: path
- description:
- - Directory of your project (see --working-dir). This is required when
- the command is not run globally.
- - Will be ignored if O(global_command=true).
- global_command:
- description:
- - Runs the specified command globally.
- type: bool
- default: false
- prefer_source:
- description:
- - Forces installation from package sources when possible (see --prefer-source).
- default: false
- type: bool
- prefer_dist:
- description:
- - Forces installation from package dist even for dev versions (see --prefer-dist).
- default: false
- type: bool
- no_dev:
- description:
- - Disables installation of require-dev packages (see --no-dev).
- default: true
- type: bool
- no_scripts:
- description:
- - Skips the execution of all scripts defined in composer.json (see --no-scripts).
- default: false
- type: bool
- no_plugins:
- description:
- - Disables all plugins (see --no-plugins).
- default: false
- type: bool
- optimize_autoloader:
- description:
- - Optimize autoloader during autoloader dump (see --optimize-autoloader).
- - Convert PSR-0/4 autoloading to classmap to get a faster autoloader.
- - Recommended especially for production, but can take a bit of time to run.
- default: true
- type: bool
- classmap_authoritative:
- description:
- - Autoload classes from classmap only.
- - Implicitly enable optimize_autoloader.
- - Recommended especially for production, but can take a bit of time to run.
- default: false
- type: bool
- apcu_autoloader:
- description:
- - Uses APCu to cache found/not-found classes
- default: false
- type: bool
- ignore_platform_reqs:
- description:
- - Ignore php, hhvm, lib-* and ext-* requirements and force the installation even if the local machine does not fulfill these.
- default: false
- type: bool
- composer_executable:
- type: path
- description:
- - Path to composer executable on the remote host, if composer is not in E(PATH) or a custom composer is needed.
- version_added: 3.2.0
+ command:
+ type: str
+ description:
+ - Composer command like V(install), V(update) and so on.
+ default: install
+ arguments:
+ type: str
+ description:
+ - Composer arguments like required package, version and so on.
+ default: ''
+ executable:
+ type: path
+ description:
+ - Path to PHP executable on the remote host, if PHP is not in E(PATH).
+ aliases: [php_path]
+ working_dir:
+ type: path
+ description:
+ - Directory of your project (see C(--working-dir)). This is required when the command is not run globally.
+ - This is ignored if O(global_command=true).
+ global_command:
+ description:
+ - Runs the specified command globally.
+ type: bool
+ default: false
+ prefer_source:
+ description:
+ - Forces installation from package sources when possible (see C(--prefer-source)).
+ default: false
+ type: bool
+ prefer_dist:
+ description:
+ - Forces installation from package dist even for dev versions (see C(--prefer-dist)).
+ default: false
+ type: bool
+ no_dev:
+ description:
+ - Disables installation of require-dev packages (see C(--no-dev)).
+ default: true
+ type: bool
+ no_scripts:
+ description:
+ - Skips the execution of all scripts defined in composer.json (see C(--no-scripts)).
+ default: false
+ type: bool
+ no_plugins:
+ description:
+ - Disables all plugins (see C(--no-plugins)).
+ default: false
+ type: bool
+ optimize_autoloader:
+ description:
+ - Optimize autoloader during autoloader dump (see C(--optimize-autoloader)).
+ - Convert PSR-0/4 autoloading to classmap to get a faster autoloader.
+ - Recommended especially for production, but can take a bit of time to run.
+ default: true
+ type: bool
+ classmap_authoritative:
+ description:
+ - Autoload classes from classmap only.
+ - Implicitly enable optimize_autoloader.
+ - Recommended especially for production, but can take a bit of time to run.
+ default: false
+ type: bool
+ apcu_autoloader:
+ description:
+ - Uses APCu to cache found/not-found classes.
+ default: false
+ type: bool
+ ignore_platform_reqs:
+ description:
+ - Ignore php, hhvm, lib-* and ext-* requirements and force the installation even if the local machine does not fulfill
+ these.
+ default: false
+ type: bool
+ composer_executable:
+ type: path
+ description:
+ - Path to composer executable on the remote host, if composer is not in E(PATH) or a custom composer is needed.
+ version_added: 3.2.0
requirements:
- - php
- - composer installed in bin path (recommended /usr/local/bin) or specified in O(composer_executable)
+ - php
+ - composer installed in bin path (recommended C(/usr/local/bin)) or specified in O(composer_executable)
notes:
- - Default options that are always appended in each execution are --no-ansi, --no-interaction and --no-progress if available.
- - We received reports about issues on macOS if composer was installed by Homebrew. Please use the official install method to avoid issues.
-'''
+ - Default options that are always appended in each execution are C(--no-ansi), C(--no-interaction) and C(--no-progress)
+ if available.
+ - We received reports about issues on macOS if composer was installed by Homebrew. Please use the official install method
+ to avoid issues.
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Download and installs all libs and dependencies outlined in the /path/to/project/composer.lock
community.general.composer:
command: install
@@ -141,7 +140,7 @@ EXAMPLES = '''
command: require
global_command: true
arguments: my/package
-'''
+"""
import re
from ansible.module_utils.basic import AnsibleModule
diff --git a/plugins/modules/consul.py b/plugins/modules/consul.py
index fe1a898835..9c36ba65f2 100644
--- a/plugins/modules/consul.py
+++ b/plugins/modules/consul.py
@@ -9,26 +9,21 @@ from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = '''
+DOCUMENTATION = r"""
module: consul
-short_description: Add, modify & delete services within a consul cluster
+short_description: Add, modify & delete services within a Consul cluster
description:
- - Registers services and checks for an agent with a consul cluster.
- A service is some process running on the agent node that should be advertised by
- consul's discovery mechanism. It may optionally supply a check definition,
- a periodic service test to notify the consul cluster of service's health.
- - "Checks may also be registered per node e.g. disk usage, or cpu usage and
- notify the health of the entire node to the cluster.
- Service level checks do not require a check name or id as these are derived
- by Consul from the Service name and id respectively by appending 'service:'
- Node level checks require a O(check_name) and optionally a O(check_id)."
- - Currently, there is no complete way to retrieve the script, interval or TTL
- metadata for a registered check. Without this metadata it is not possible to
- tell if the data supplied with ansible represents a change to a check. As a
- result this does not attempt to determine changes and will always report a
- changed occurred. An API method is planned to supply this metadata so at that
- stage change management will be added.
- - "See U(http://consul.io) for more details."
+ - Registers services and checks for an agent with a Consul cluster. A service is some process running on the agent node
+ that should be advertised by Consul's discovery mechanism. It may optionally supply a check definition, a periodic service
+ test to notify the Consul cluster of service's health.
+ - Checks may also be registered per node, for example disk usage, or cpu usage and notify the health of the entire node
+ to the cluster. Service level checks do not require a check name or ID as these are derived by Consul from the Service
+ name and ID respectively by appending V(service:) Node level checks require a O(check_name) and optionally a O(check_id).
+ - Currently, there is no complete way to retrieve the script, interval or TTL metadata for a registered check. Without this
+ metadata it is not possible to tell if the data supplied with ansible represents a change to a check. As a result this
+ does not attempt to determine changes and it always reports a changed occurred. An API method is planned to supply this
+ metadata so at that stage change management is to be added.
+ - See U(http://consul.io) for more details.
requirements:
- python-consul
- requests
@@ -41,143 +36,127 @@ attributes:
diff_mode:
support: none
options:
- state:
- type: str
- description:
- - Register or deregister the consul service, defaults to present.
- default: present
- choices: ['present', 'absent']
- service_name:
- type: str
- description:
- - Unique name for the service on a node, must be unique per node,
- required if registering a service. May be omitted if registering
- a node level check.
- service_id:
- type: str
- description:
- - The ID for the service, must be unique per node. If O(state=absent),
- defaults to the service name if supplied.
- host:
- type: str
- description:
- - Host of the consul agent defaults to localhost.
- default: localhost
- port:
- type: int
- description:
- - The port on which the consul agent is running.
- default: 8500
- scheme:
- type: str
- description:
- - The protocol scheme on which the consul agent is running.
- default: http
- validate_certs:
- description:
- - Whether to verify the TLS certificate of the consul agent.
- type: bool
- default: true
- notes:
- type: str
- description:
- - Notes to attach to check when registering it.
- service_port:
- type: int
- description:
- - The port on which the service is listening. Can optionally be supplied for
- registration of a service, that is if O(service_name) or O(service_id) is set.
- service_address:
- type: str
- description:
- - The address to advertise that the service will be listening on.
- This value will be passed as the C(address) parameter to Consul's
- C(/v1/agent/service/register) API method, so refer to the Consul API
- documentation for further details.
- tags:
- type: list
- elements: str
- description:
- - Tags that will be attached to the service registration.
- script:
- type: str
- description:
- - The script/command that will be run periodically to check the health of the service.
- - Requires O(interval) to be provided.
- - Mutually exclusive with O(ttl), O(tcp) and O(http).
- interval:
- type: str
- description:
- - The interval at which the service check will be run.
- This is a number with a V(s) or V(m) suffix to signify the units of seconds or minutes, for example V(15s) or V(1m).
- If no suffix is supplied V(s) will be used by default, for example V(10) will be V(10s).
- - Required if one of the parameters O(script), O(http), or O(tcp) is specified.
- check_id:
- type: str
- description:
- - An ID for the service check. If O(state=absent), defaults to
- O(check_name). Ignored if part of a service definition.
- check_name:
- type: str
- description:
- - Name for the service check. Required if standalone, ignored if
- part of service definition.
- check_node:
- description:
- - Node name.
- # TODO: properly document!
- type: str
- check_host:
- description:
- - Host name.
- # TODO: properly document!
- type: str
- ttl:
- type: str
- description:
- - Checks can be registered with a TTL instead of a O(script) and O(interval)
- this means that the service will check in with the agent before the
- TTL expires. If it doesn't the check will be considered failed.
- Required if registering a check and the script an interval are missing
- Similar to the interval this is a number with a V(s) or V(m) suffix to
- signify the units of seconds or minutes, for example V(15s) or V(1m).
- If no suffix is supplied V(s) will be used by default, for example V(10) will be V(10s).
- - Mutually exclusive with O(script), O(tcp) and O(http).
- tcp:
- type: str
- description:
- - Checks can be registered with a TCP port. This means that consul
- will check if the connection attempt to that port is successful (that is, the port is currently accepting connections).
- The format is V(host:port), for example V(localhost:80).
- - Requires O(interval) to be provided.
- - Mutually exclusive with O(script), O(ttl) and O(http).
- version_added: '1.3.0'
- http:
- type: str
- description:
- - Checks can be registered with an HTTP endpoint. This means that consul
- will check that the http endpoint returns a successful HTTP status.
- - Requires O(interval) to be provided.
- - Mutually exclusive with O(script), O(ttl) and O(tcp).
- timeout:
- type: str
- description:
- - A custom HTTP check timeout. The consul default is 10 seconds.
- Similar to the interval this is a number with a V(s) or V(m) suffix to
- signify the units of seconds or minutes, for example V(15s) or V(1m).
- If no suffix is supplied V(s) will be used by default, for example V(10) will be V(10s).
- token:
- type: str
- description:
- - The token key identifying an ACL rule set. May be required to register services.
- ack_params_state_absent:
- type: bool
- description:
- - This parameter has no more effect and is deprecated. It will be removed in community.general 10.0.0.
-'''
+ state:
+ type: str
+ description:
+ - Register or deregister the Consul service, defaults to present.
+ default: present
+ choices: ['present', 'absent']
+ service_name:
+ type: str
+ description:
+ - Unique name for the service on a node, must be unique per node, required if registering a service. May be omitted
+ if registering a node level check.
+ service_id:
+ type: str
+ description:
+ - The ID for the service, must be unique per node. If O(state=absent), defaults to the service name if supplied.
+ host:
+ type: str
+ description:
+ - Host of the Consul agent defaults to localhost.
+ default: localhost
+ port:
+ type: int
+ description:
+ - The port on which the Consul agent is running.
+ default: 8500
+ scheme:
+ type: str
+ description:
+ - The protocol scheme on which the Consul agent is running.
+ default: http
+ validate_certs:
+ description:
+ - Whether to verify the TLS certificate of the Consul agent.
+ type: bool
+ default: true
+ notes:
+ type: str
+ description:
+ - Notes to attach to check when registering it.
+ service_port:
+ type: int
+ description:
+ - The port on which the service is listening. Can optionally be supplied for registration of a service, that is if O(service_name)
+ or O(service_id) is set.
+ service_address:
+ type: str
+ description:
+ - The address to advertise that the service is listening on. This value is passed as the C(address) parameter to Consul's
+ C(/v1/agent/service/register) API method, so refer to the Consul API documentation for further details.
+ tags:
+ type: list
+ elements: str
+ description:
+ - Tags that are attached to the service registration.
+ script:
+ type: str
+ description:
+ - The script/command that is run periodically to check the health of the service.
+ - Requires O(interval) to be provided.
+ - Mutually exclusive with O(ttl), O(tcp) and O(http).
+ interval:
+ type: str
+ description:
+ - The interval at which the service check is run. This is a number with a V(s) or V(m) suffix to signify the units of
+ seconds or minutes, for example V(15s) or V(1m). If no suffix is supplied V(s) is used by default, for example V(10)
+ is V(10s).
+ - Required if one of the parameters O(script), O(http), or O(tcp) is specified.
+ check_id:
+ type: str
+ description:
+ - An ID for the service check. If O(state=absent), defaults to O(check_name). Ignored if part of a service definition.
+ check_name:
+ type: str
+ description:
+ - Name for the service check. Required if standalone, ignored if part of service definition.
+ check_node:
+ description:
+ - Node name.
+ type: str
+ check_host:
+ description:
+ - Host name.
+ type: str
+ ttl:
+ type: str
+ description:
+ - Checks can be registered with a TTL instead of a O(script) and O(interval) this means that the service checks in with
+ the agent before the TTL expires. If it does not the check is considered failed. Required if registering a check and
+ the script an interval are missing Similar to the interval this is a number with a V(s) or V(m) suffix to signify
+ the units of seconds or minutes, for example V(15s) or V(1m). If no suffix is supplied V(s) is used by default, for
+ example V(10) is equivalent to V(10s).
+ - Mutually exclusive with O(script), O(tcp) and O(http).
+ tcp:
+ type: str
+ description:
+ - Checks can be registered with a TCP port. This means that Consul checks if the connection attempt to that port is
+ successful (that is, the port is currently accepting connections). The format is V(host:port), for example V(localhost:80).
+ - Requires O(interval) to be provided.
+ - Mutually exclusive with O(script), O(ttl) and O(http).
+ version_added: '1.3.0'
+ http:
+ type: str
+ description:
+ - Checks can be registered with an HTTP endpoint. This means that Consul checks that the http endpoint returns a successful
+ HTTP status.
+ - Requires O(interval) to be provided.
+ - Mutually exclusive with O(script), O(ttl) and O(tcp).
+ timeout:
+ type: str
+ description:
+ - A custom HTTP check timeout. The Consul default is 10 seconds. Similar to the interval this is a number with a V(s)
+ or V(m) suffix to signify the units of seconds or minutes, for example V(15s) or V(1m). If no suffix is supplied V(s)
+ is used by default, for example V(10) is equivalent to V(10s).
+ token:
+ type: str
+ description:
+ - The token key identifying an ACL rule set. May be required to register services.
+"""
-EXAMPLES = '''
-- name: Register nginx service with the local consul agent
+EXAMPLES = r"""
+- name: Register nginx service with the local Consul agent
community.general.consul:
service_name: nginx
service_port: 80
@@ -243,7 +222,7 @@ EXAMPLES = '''
service_id: nginx
interval: 60s
http: http://localhost:80/morestatus
-'''
+"""
try:
import consul
@@ -598,11 +577,6 @@ def main():
timeout=dict(type='str'),
tags=dict(type='list', elements='str'),
token=dict(no_log=True),
- ack_params_state_absent=dict(
- type='bool',
- removed_in_version='10.0.0',
- removed_from_collection='community.general',
- ),
),
mutually_exclusive=[
('script', 'ttl', 'tcp', 'http'),
diff --git a/plugins/modules/consul_acl.py b/plugins/modules/consul_acl.py
deleted file mode 100644
index 2d60af0625..0000000000
--- a/plugins/modules/consul_acl.py
+++ /dev/null
@@ -1,695 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-#
-# Copyright (c) 2015, Steve Gargan
-# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
-module: consul_acl
-short_description: Manipulate Consul ACL keys and rules
-description:
- - Allows the addition, modification and deletion of ACL keys and associated
- rules in a consul cluster via the agent. For more details on using and
- configuring ACLs, see https://www.consul.io/docs/guides/acl.html.
-author:
- - Steve Gargan (@sgargan)
- - Colin Nolan (@colin-nolan)
-extends_documentation_fragment:
- - community.general.attributes
-attributes:
- check_mode:
- support: none
- diff_mode:
- support: none
-deprecated:
- removed_in: 10.0.0
- why: The legacy ACL system was removed from Consul.
- alternative: Use M(community.general.consul_token) and/or M(community.general.consul_policy) instead.
-options:
- mgmt_token:
- description:
- - a management token is required to manipulate the acl lists
- required: true
- type: str
- state:
- description:
- - whether the ACL pair should be present or absent
- required: false
- choices: ['present', 'absent']
- default: present
- type: str
- token_type:
- description:
- - the type of token that should be created
- choices: ['client', 'management']
- default: client
- type: str
- name:
- description:
- - the name that should be associated with the acl key, this is opaque
- to Consul
- required: false
- type: str
- token:
- description:
- - the token key identifying an ACL rule set. If generated by consul
- this will be a UUID
- required: false
- type: str
- rules:
- type: list
- elements: dict
- description:
- - rules that should be associated with a given token
- required: false
- host:
- description:
- - host of the consul agent defaults to localhost
- required: false
- default: localhost
- type: str
- port:
- type: int
- description:
- - the port on which the consul agent is running
- required: false
- default: 8500
- scheme:
- description:
- - the protocol scheme on which the consul agent is running
- required: false
- default: http
- type: str
- validate_certs:
- type: bool
- description:
- - whether to verify the tls certificate of the consul agent
- required: false
- default: true
-requirements:
- - python-consul
- - pyhcl
- - requests
-'''
-
-EXAMPLES = """
-- name: Create an ACL with rules
- community.general.consul_acl:
- host: consul1.example.com
- mgmt_token: some_management_acl
- name: Foo access
- rules:
- - key: "foo"
- policy: read
- - key: "private/foo"
- policy: deny
-
-- name: Create an ACL with a specific token
- community.general.consul_acl:
- host: consul1.example.com
- mgmt_token: some_management_acl
- name: Foo access
- token: my-token
- rules:
- - key: "foo"
- policy: read
-
-- name: Update the rules associated to an ACL token
- community.general.consul_acl:
- host: consul1.example.com
- mgmt_token: some_management_acl
- name: Foo access
- token: some_client_token
- rules:
- - event: "bbq"
- policy: write
- - key: "foo"
- policy: read
- - key: "private"
- policy: deny
- - keyring: write
- - node: "hgs4"
- policy: write
- - operator: read
- - query: ""
- policy: write
- - service: "consul"
- policy: write
- - session: "standup"
- policy: write
-
-- name: Remove a token
- community.general.consul_acl:
- host: consul1.example.com
- mgmt_token: some_management_acl
- token: 172bd5c8-9fe9-11e4-b1b0-3c15c2c9fd5e
- state: absent
-"""
-
-RETURN = """
-token:
- description: the token associated to the ACL (the ACL's ID)
- returned: success
- type: str
- sample: a2ec332f-04cf-6fba-e8b8-acf62444d3da
-rules:
- description: the HCL JSON representation of the rules associated to the ACL, in the format described in the
- Consul documentation (https://www.consul.io/docs/guides/acl.html#rule-specification).
- returned: when O(state=present)
- type: dict
- sample: {
- "key": {
- "foo": {
- "policy": "write"
- },
- "bar": {
- "policy": "deny"
- }
- }
- }
-operation:
- description: the operation performed on the ACL
- returned: changed
- type: str
- sample: update
-"""
-
-
-try:
- import consul
- python_consul_installed = True
-except ImportError:
- python_consul_installed = False
-
-try:
- import hcl
- pyhcl_installed = True
-except ImportError:
- pyhcl_installed = False
-
-try:
- from requests.exceptions import ConnectionError
- has_requests = True
-except ImportError:
- has_requests = False
-
-from collections import defaultdict
-from ansible.module_utils.basic import to_text, AnsibleModule
-
-
-RULE_SCOPES = [
- "agent",
- "agent_prefix",
- "event",
- "event_prefix",
- "key",
- "key_prefix",
- "keyring",
- "node",
- "node_prefix",
- "operator",
- "query",
- "query_prefix",
- "service",
- "service_prefix",
- "session",
- "session_prefix",
-]
-
-MANAGEMENT_PARAMETER_NAME = "mgmt_token"
-HOST_PARAMETER_NAME = "host"
-SCHEME_PARAMETER_NAME = "scheme"
-VALIDATE_CERTS_PARAMETER_NAME = "validate_certs"
-NAME_PARAMETER_NAME = "name"
-PORT_PARAMETER_NAME = "port"
-RULES_PARAMETER_NAME = "rules"
-STATE_PARAMETER_NAME = "state"
-TOKEN_PARAMETER_NAME = "token"
-TOKEN_TYPE_PARAMETER_NAME = "token_type"
-
-PRESENT_STATE_VALUE = "present"
-ABSENT_STATE_VALUE = "absent"
-
-CLIENT_TOKEN_TYPE_VALUE = "client"
-MANAGEMENT_TOKEN_TYPE_VALUE = "management"
-
-REMOVE_OPERATION = "remove"
-UPDATE_OPERATION = "update"
-CREATE_OPERATION = "create"
-
-_POLICY_JSON_PROPERTY = "policy"
-_RULES_JSON_PROPERTY = "Rules"
-_TOKEN_JSON_PROPERTY = "ID"
-_TOKEN_TYPE_JSON_PROPERTY = "Type"
-_NAME_JSON_PROPERTY = "Name"
-_POLICY_YML_PROPERTY = "policy"
-_POLICY_HCL_PROPERTY = "policy"
-
-_ARGUMENT_SPEC = {
- MANAGEMENT_PARAMETER_NAME: dict(required=True, no_log=True),
- HOST_PARAMETER_NAME: dict(default='localhost'),
- SCHEME_PARAMETER_NAME: dict(default='http'),
- VALIDATE_CERTS_PARAMETER_NAME: dict(type='bool', default=True),
- NAME_PARAMETER_NAME: dict(),
- PORT_PARAMETER_NAME: dict(default=8500, type='int'),
- RULES_PARAMETER_NAME: dict(type='list', elements='dict'),
- STATE_PARAMETER_NAME: dict(default=PRESENT_STATE_VALUE, choices=[PRESENT_STATE_VALUE, ABSENT_STATE_VALUE]),
- TOKEN_PARAMETER_NAME: dict(no_log=False),
- TOKEN_TYPE_PARAMETER_NAME: dict(choices=[CLIENT_TOKEN_TYPE_VALUE, MANAGEMENT_TOKEN_TYPE_VALUE],
- default=CLIENT_TOKEN_TYPE_VALUE)
-}
-
-
-def set_acl(consul_client, configuration):
- """
- Sets an ACL based on the given configuration.
- :param consul_client: the consul client
- :param configuration: the run configuration
- :return: the output of setting the ACL
- """
- acls_as_json = decode_acls_as_json(consul_client.acl.list())
- existing_acls_mapped_by_name = {acl.name: acl for acl in acls_as_json if acl.name is not None}
- existing_acls_mapped_by_token = {acl.token: acl for acl in acls_as_json}
- if None in existing_acls_mapped_by_token:
- raise AssertionError("expecting ACL list to be associated to a token: %s" %
- existing_acls_mapped_by_token[None])
-
- if configuration.token is None and configuration.name and configuration.name in existing_acls_mapped_by_name:
- # No token but name given so can get token from name
- configuration.token = existing_acls_mapped_by_name[configuration.name].token
-
- if configuration.token and configuration.token in existing_acls_mapped_by_token:
- return update_acl(consul_client, configuration)
- else:
- if configuration.token in existing_acls_mapped_by_token:
- raise AssertionError()
- if configuration.name in existing_acls_mapped_by_name:
- raise AssertionError()
- return create_acl(consul_client, configuration)
-
-
-def update_acl(consul_client, configuration):
- """
- Updates an ACL.
- :param consul_client: the consul client
- :param configuration: the run configuration
- :return: the output of the update
- """
- existing_acl = load_acl_with_token(consul_client, configuration.token)
- changed = existing_acl.rules != configuration.rules
-
- if changed:
- name = configuration.name if configuration.name is not None else existing_acl.name
- rules_as_hcl = encode_rules_as_hcl_string(configuration.rules)
- updated_token = consul_client.acl.update(
- configuration.token, name=name, type=configuration.token_type, rules=rules_as_hcl)
- if updated_token != configuration.token:
- raise AssertionError()
-
- return Output(changed=changed, token=configuration.token, rules=configuration.rules, operation=UPDATE_OPERATION)
-
-
-def create_acl(consul_client, configuration):
- """
- Creates an ACL.
- :param consul_client: the consul client
- :param configuration: the run configuration
- :return: the output of the creation
- """
- rules_as_hcl = encode_rules_as_hcl_string(configuration.rules) if len(configuration.rules) > 0 else None
- token = consul_client.acl.create(
- name=configuration.name, type=configuration.token_type, rules=rules_as_hcl, acl_id=configuration.token)
- rules = configuration.rules
- return Output(changed=True, token=token, rules=rules, operation=CREATE_OPERATION)
-
-
-def remove_acl(consul, configuration):
- """
- Removes an ACL.
- :param consul: the consul client
- :param configuration: the run configuration
- :return: the output of the removal
- """
- token = configuration.token
- changed = consul.acl.info(token) is not None
- if changed:
- consul.acl.destroy(token)
- return Output(changed=changed, token=token, operation=REMOVE_OPERATION)
-
-
-def load_acl_with_token(consul, token):
- """
- Loads the ACL with the given token (token == rule ID).
- :param consul: the consul client
- :param token: the ACL "token"/ID (not name)
- :return: the ACL associated to the given token
- :exception ConsulACLTokenNotFoundException: raised if the given token does not exist
- """
- acl_as_json = consul.acl.info(token)
- if acl_as_json is None:
- raise ConsulACLNotFoundException(token)
- return decode_acl_as_json(acl_as_json)
-
-
-def encode_rules_as_hcl_string(rules):
- """
- Converts the given rules into the equivalent HCL (string) representation.
- :param rules: the rules
- :return: the equivalent HCL (string) representation of the rules. Will be None if there is no rules (see internal
- note for justification)
- """
- if len(rules) == 0:
- # Note: empty string is not valid HCL according to `hcl.load` however, the ACL `Rule` property will be an empty
- # string if there is no rules...
- return None
- rules_as_hcl = ""
- for rule in rules:
- rules_as_hcl += encode_rule_as_hcl_string(rule)
- return rules_as_hcl
-
-
-def encode_rule_as_hcl_string(rule):
- """
- Converts the given rule into the equivalent HCL (string) representation.
- :param rule: the rule
- :return: the equivalent HCL (string) representation of the rule
- """
- if rule.pattern is not None:
- return '%s "%s" {\n %s = "%s"\n}\n' % (rule.scope, rule.pattern, _POLICY_HCL_PROPERTY, rule.policy)
- else:
- return '%s = "%s"\n' % (rule.scope, rule.policy)
-
-
-def decode_rules_as_hcl_string(rules_as_hcl):
- """
- Converts the given HCL (string) representation of rules into a list of rule domain models.
- :param rules_as_hcl: the HCL (string) representation of a collection of rules
- :return: the equivalent domain model to the given rules
- """
- rules_as_hcl = to_text(rules_as_hcl)
- rules_as_json = hcl.loads(rules_as_hcl)
- return decode_rules_as_json(rules_as_json)
-
-
-def decode_rules_as_json(rules_as_json):
- """
- Converts the given JSON representation of rules into a list of rule domain models.
- :param rules_as_json: the JSON representation of a collection of rules
- :return: the equivalent domain model to the given rules
- """
- rules = RuleCollection()
- for scope in rules_as_json:
- if not isinstance(rules_as_json[scope], dict):
- rules.add(Rule(scope, rules_as_json[scope]))
- else:
- for pattern, policy in rules_as_json[scope].items():
- rules.add(Rule(scope, policy[_POLICY_JSON_PROPERTY], pattern))
- return rules
-
-
-def encode_rules_as_json(rules):
- """
- Converts the given rules into the equivalent JSON representation according to the documentation:
- https://www.consul.io/docs/guides/acl.html#rule-specification.
- :param rules: the rules
- :return: JSON representation of the given rules
- """
- rules_as_json = defaultdict(dict)
- for rule in rules:
- if rule.pattern is not None:
- if rule.pattern in rules_as_json[rule.scope]:
- raise AssertionError()
- rules_as_json[rule.scope][rule.pattern] = {
- _POLICY_JSON_PROPERTY: rule.policy
- }
- else:
- if rule.scope in rules_as_json:
- raise AssertionError()
- rules_as_json[rule.scope] = rule.policy
- return rules_as_json
-
-
-def decode_rules_as_yml(rules_as_yml):
- """
- Converts the given YAML representation of rules into a list of rule domain models.
- :param rules_as_yml: the YAML representation of a collection of rules
- :return: the equivalent domain model to the given rules
- """
- rules = RuleCollection()
- if rules_as_yml:
- for rule_as_yml in rules_as_yml:
- rule_added = False
- for scope in RULE_SCOPES:
- if scope in rule_as_yml:
- if rule_as_yml[scope] is None:
- raise ValueError("Rule for '%s' does not have a value associated to the scope" % scope)
- policy = rule_as_yml[_POLICY_YML_PROPERTY] if _POLICY_YML_PROPERTY in rule_as_yml \
- else rule_as_yml[scope]
- pattern = rule_as_yml[scope] if _POLICY_YML_PROPERTY in rule_as_yml else None
- rules.add(Rule(scope, policy, pattern))
- rule_added = True
- break
- if not rule_added:
- raise ValueError("A rule requires one of %s and a policy." % ('/'.join(RULE_SCOPES)))
- return rules
-
-
-def decode_acl_as_json(acl_as_json):
- """
- Converts the given JSON representation of an ACL into the equivalent domain model.
- :param acl_as_json: the JSON representation of an ACL
- :return: the equivalent domain model to the given ACL
- """
- rules_as_hcl = acl_as_json[_RULES_JSON_PROPERTY]
- rules = decode_rules_as_hcl_string(acl_as_json[_RULES_JSON_PROPERTY]) if rules_as_hcl.strip() != "" \
- else RuleCollection()
- return ACL(
- rules=rules,
- token_type=acl_as_json[_TOKEN_TYPE_JSON_PROPERTY],
- token=acl_as_json[_TOKEN_JSON_PROPERTY],
- name=acl_as_json[_NAME_JSON_PROPERTY]
- )
-
-
-def decode_acls_as_json(acls_as_json):
- """
- Converts the given JSON representation of ACLs into a list of ACL domain models.
- :param acls_as_json: the JSON representation of a collection of ACLs
- :return: list of equivalent domain models for the given ACLs (order not guaranteed to be the same)
- """
- return [decode_acl_as_json(acl_as_json) for acl_as_json in acls_as_json]
-
-
-class ConsulACLNotFoundException(Exception):
- """
- Exception raised if an ACL with is not found.
- """
-
-
-class Configuration:
- """
- Configuration for this module.
- """
-
- def __init__(self, management_token=None, host=None, scheme=None, validate_certs=None, name=None, port=None,
- rules=None, state=None, token=None, token_type=None):
- self.management_token = management_token # type: str
- self.host = host # type: str
- self.scheme = scheme # type: str
- self.validate_certs = validate_certs # type: bool
- self.name = name # type: str
- self.port = port # type: int
- self.rules = rules # type: RuleCollection
- self.state = state # type: str
- self.token = token # type: str
- self.token_type = token_type # type: str
-
-
-class Output:
- """
- Output of an action of this module.
- """
-
- def __init__(self, changed=None, token=None, rules=None, operation=None):
- self.changed = changed # type: bool
- self.token = token # type: str
- self.rules = rules # type: RuleCollection
- self.operation = operation # type: str
-
-
-class ACL:
- """
- Consul ACL. See: https://www.consul.io/docs/guides/acl.html.
- """
-
- def __init__(self, rules, token_type, token, name):
- self.rules = rules
- self.token_type = token_type
- self.token = token
- self.name = name
-
- def __eq__(self, other):
- return other \
- and isinstance(other, self.__class__) \
- and self.rules == other.rules \
- and self.token_type == other.token_type \
- and self.token == other.token \
- and self.name == other.name
-
- def __hash__(self):
- return hash(self.rules) ^ hash(self.token_type) ^ hash(self.token) ^ hash(self.name)
-
-
-class Rule:
- """
- ACL rule. See: https://www.consul.io/docs/guides/acl.html#acl-rules-and-scope.
- """
-
- def __init__(self, scope, policy, pattern=None):
- self.scope = scope
- self.policy = policy
- self.pattern = pattern
-
- def __eq__(self, other):
- return isinstance(other, self.__class__) \
- and self.scope == other.scope \
- and self.policy == other.policy \
- and self.pattern == other.pattern
-
- def __ne__(self, other):
- return not self.__eq__(other)
-
- def __hash__(self):
- return (hash(self.scope) ^ hash(self.policy)) ^ hash(self.pattern)
-
- def __str__(self):
- return encode_rule_as_hcl_string(self)
-
-
-class RuleCollection:
- """
- Collection of ACL rules, which are part of a Consul ACL.
- """
-
- def __init__(self):
- self._rules = {}
- for scope in RULE_SCOPES:
- self._rules[scope] = {}
-
- def __iter__(self):
- all_rules = []
- for scope, pattern_keyed_rules in self._rules.items():
- for pattern, rule in pattern_keyed_rules.items():
- all_rules.append(rule)
- return iter(all_rules)
-
- def __len__(self):
- count = 0
- for scope in RULE_SCOPES:
- count += len(self._rules[scope])
- return count
-
- def __eq__(self, other):
- return isinstance(other, self.__class__) \
- and set(self) == set(other)
-
- def __ne__(self, other):
- return not self.__eq__(other)
-
- def __str__(self):
- return encode_rules_as_hcl_string(self)
-
- def add(self, rule):
- """
- Adds the given rule to this collection.
- :param rule: model of a rule
- :raises ValueError: raised if there already exists a rule for a given scope and pattern
- """
- if rule.pattern in self._rules[rule.scope]:
- patten_info = " and pattern '%s'" % rule.pattern if rule.pattern is not None else ""
- raise ValueError("Duplicate rule for scope '%s'%s" % (rule.scope, patten_info))
- self._rules[rule.scope][rule.pattern] = rule
-
-
-def get_consul_client(configuration):
- """
- Gets a Consul client for the given configuration.
-
- Does not check if the Consul client can connect.
- :param configuration: the run configuration
- :return: Consul client
- """
- token = configuration.management_token
- if token is None:
- token = configuration.token
- if token is None:
- raise AssertionError("Expecting the management token to always be set")
- return consul.Consul(host=configuration.host, port=configuration.port, scheme=configuration.scheme,
- verify=configuration.validate_certs, token=token)
-
-
-def check_dependencies():
- """
- Checks that the required dependencies have been imported.
- :exception ImportError: if it is detected that any of the required dependencies have not been imported
- """
- if not python_consul_installed:
- raise ImportError("python-consul required for this module. "
- "See: https://python-consul.readthedocs.io/en/latest/#installation")
-
- if not pyhcl_installed:
- raise ImportError("pyhcl required for this module. "
- "See: https://pypi.org/project/pyhcl/")
-
- if not has_requests:
- raise ImportError("requests required for this module. See https://pypi.org/project/requests/")
-
-
-def main():
- """
- Main method.
- """
- module = AnsibleModule(_ARGUMENT_SPEC, supports_check_mode=False)
-
- try:
- check_dependencies()
- except ImportError as e:
- module.fail_json(msg=str(e))
-
- configuration = Configuration(
- management_token=module.params.get(MANAGEMENT_PARAMETER_NAME),
- host=module.params.get(HOST_PARAMETER_NAME),
- scheme=module.params.get(SCHEME_PARAMETER_NAME),
- validate_certs=module.params.get(VALIDATE_CERTS_PARAMETER_NAME),
- name=module.params.get(NAME_PARAMETER_NAME),
- port=module.params.get(PORT_PARAMETER_NAME),
- rules=decode_rules_as_yml(module.params.get(RULES_PARAMETER_NAME)),
- state=module.params.get(STATE_PARAMETER_NAME),
- token=module.params.get(TOKEN_PARAMETER_NAME),
- token_type=module.params.get(TOKEN_TYPE_PARAMETER_NAME)
- )
- consul_client = get_consul_client(configuration)
-
- try:
- if configuration.state == PRESENT_STATE_VALUE:
- output = set_acl(consul_client, configuration)
- else:
- output = remove_acl(consul_client, configuration)
- except ConnectionError as e:
- module.fail_json(msg='Could not connect to consul agent at %s:%s, error was %s' % (
- configuration.host, configuration.port, str(e)))
- raise
-
- return_values = dict(changed=output.changed, token=output.token, operation=output.operation)
- if output.rules is not None:
- return_values["rules"] = encode_rules_as_json(output.rules)
- module.exit_json(**return_values)
-
-
-if __name__ == "__main__":
- main()
diff --git a/plugins/modules/consul_acl_bootstrap.py b/plugins/modules/consul_acl_bootstrap.py
index bf1da110bf..ba6adf2dd3 100644
--- a/plugins/modules/consul_acl_bootstrap.py
+++ b/plugins/modules/consul_acl_bootstrap.py
@@ -9,13 +9,13 @@ from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = """
+DOCUMENTATION = r"""
module: consul_acl_bootstrap
short_description: Bootstrap ACLs in Consul
version_added: 8.3.0
description:
- - Allows bootstrapping of ACLs in a Consul cluster, see
- U(https://developer.hashicorp.com/consul/api-docs/acl#bootstrap-acls) for details.
+ - Allows bootstrapping of ACLs in a Consul cluster, see U(https://developer.hashicorp.com/consul/api-docs/acl#bootstrap-acls)
+ for details.
author:
- Florian Apolloner (@apollo13)
extends_documentation_fragment:
@@ -40,35 +40,33 @@ options:
type: str
"""
-EXAMPLES = """
+EXAMPLES = r"""
- name: Bootstrap the ACL system
community.general.consul_acl_bootstrap:
bootstrap_secret: 22eaeed1-bdbd-4651-724e-42ae6c43e387
"""
-RETURN = """
+RETURN = r"""
result:
- description:
- - The bootstrap result as returned by the consul HTTP API.
- - "B(Note:) If O(bootstrap_secret) has been specified the C(SecretID) and
- C(ID) will not contain the secret but C(VALUE_SPECIFIED_IN_NO_LOG_PARAMETER).
- If you pass O(bootstrap_secret), make sure your playbook/role does not depend
- on this return value!"
- returned: changed
- type: dict
- sample:
- AccessorID: 834a5881-10a9-a45b-f63c-490e28743557
- CreateIndex: 25
- CreateTime: '2024-01-21T20:26:27.114612038+01:00'
- Description: Bootstrap Token (Global Management)
- Hash: X2AgaFhnQGRhSSF/h0m6qpX1wj/HJWbyXcxkEM/5GrY=
- ID: VALUE_SPECIFIED_IN_NO_LOG_PARAMETER
- Local: false
- ModifyIndex: 25
- Policies:
+ description:
+ - The bootstrap result as returned by the Consul HTTP API.
+ - B(Note:) If O(bootstrap_secret) has been specified the C(SecretID) and C(ID) do not contain the secret but C(VALUE_SPECIFIED_IN_NO_LOG_PARAMETER).
+ If you pass O(bootstrap_secret), make sure your playbook/role does not depend on this return value!
+ returned: changed
+ type: dict
+ sample:
+ AccessorID: 834a5881-10a9-a45b-f63c-490e28743557
+ CreateIndex: 25
+ CreateTime: '2024-01-21T20:26:27.114612038+01:00'
+ Description: Bootstrap Token (Global Management)
+ Hash: X2AgaFhnQGRhSSF/h0m6qpX1wj/HJWbyXcxkEM/5GrY=
+ ID: VALUE_SPECIFIED_IN_NO_LOG_PARAMETER
+ Local: false
+ ModifyIndex: 25
+ Policies:
- ID: 00000000-0000-0000-0000-000000000001
Name: global-management
- SecretID: VALUE_SPECIFIED_IN_NO_LOG_PARAMETER
+ SecretID: VALUE_SPECIFIED_IN_NO_LOG_PARAMETER
"""
from ansible.module_utils.basic import AnsibleModule
diff --git a/plugins/modules/consul_agent_check.py b/plugins/modules/consul_agent_check.py
index 3739260049..51d9715e88 100644
--- a/plugins/modules/consul_agent_check.py
+++ b/plugins/modules/consul_agent_check.py
@@ -9,20 +9,17 @@ from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = '''
+DOCUMENTATION = r"""
module: consul_agent_check
-short_description: Add, modify, and delete checks within a consul cluster
+short_description: Add, modify, and delete checks within a Consul cluster
version_added: 9.1.0
description:
- - Allows the addition, modification and deletion of checks in a consul
- cluster via the agent. For more details on using and configuring Checks,
- see U(https://developer.hashicorp.com/consul/api-docs/agent/check).
- - Currently, there is no complete way to retrieve the script, interval or TTL
- metadata for a registered check. Without this metadata it is not possible to
- tell if the data supplied with ansible represents a change to a check. As a
- result this does not attempt to determine changes and will always report a
- changed occurred. An API method is planned to supply this metadata so at that
- stage change management will be added.
+ - Allows the addition, modification and deletion of checks in a Consul cluster using the agent. For more details on using
+ and configuring Checks, see U(https://developer.hashicorp.com/consul/api-docs/agent/check).
+ - Currently, there is no complete way to retrieve the script, interval or TTL metadata for a registered check. Without this
+ metadata it is not possible to tell if the data supplied with ansible represents a change to a check. As a result, the
+ module does not attempt to determine changes and it always reports a changed occurred. An API method is planned to supply
+ this metadata so at that stage change management is to be added.
author:
- Michael Ilg (@Ilgmi)
extends_documentation_fragment:
@@ -34,13 +31,13 @@ attributes:
check_mode:
support: full
details:
- - The result is the object as it is defined in the module options and not the object structure of the consul API.
- For a better overview of what the object structure looks like,
- take a look at U(https://developer.hashicorp.com/consul/api-docs/agent/check#list-checks).
+ - The result is the object as it is defined in the module options and not the object structure of the Consul API. For
+ a better overview of what the object structure looks like, take a look at U(https://developer.hashicorp.com/consul/api-docs/agent/check#list-checks).
diff_mode:
support: partial
details:
- - In check mode the diff will show the object as it is defined in the module options and not the object structure of the consul API.
+ - In check mode the diff shows the object as it is defined in the module options and not the object structure of the
+ Consul API.
options:
state:
description:
@@ -50,18 +47,18 @@ options:
type: str
name:
description:
- - Required name for the service check.
+ - Required name for the service check.
type: str
id:
description:
- - Specifies a unique ID for this check on the node. This defaults to the O(name) parameter, but it may be necessary to provide
- an ID for uniqueness. This value will return in the response as "CheckId".
+ - Specifies a unique ID for this check on the node. This defaults to the O(name) parameter, but it may be necessary
+ to provide an ID for uniqueness. This value is returned in the response as V(CheckId).
type: str
interval:
description:
- - The interval at which the service check will be run.
- This is a number with a V(s) or V(m) suffix to signify the units of seconds or minutes, for example V(15s) or V(1m).
- If no suffix is supplied V(s) will be used by default, for example V(10) will be V(10s).
+ - The interval at which the service check is run. This is a number with a V(s) or V(m) suffix to signify the units of
+ seconds or minutes, for example V(15s) or V(1m). If no suffix is supplied V(s) is used by default, for example V(10)
+ is equivalent to V(10s).
- Required if one of the parameters O(args), O(http), or O(tcp) is specified.
type: str
notes:
@@ -77,46 +74,41 @@ options:
elements: str
ttl:
description:
- - Checks can be registered with a TTL instead of a O(args) and O(interval)
- this means that the service will check in with the agent before the
- TTL expires. If it doesn't the check will be considered failed.
- Required if registering a check and the script an interval are missing
- Similar to the interval this is a number with a V(s) or V(m) suffix to
- signify the units of seconds or minutes, for example V(15s) or V(1m).
- If no suffix is supplied V(s) will be used by default, for example V(10) will be V(10s).
+ - Checks can be registered with a TTL instead of a O(args) and O(interval) this means that the service checks in with
+ the agent before the TTL expires. If it does not the check is considered failed. Required if registering a check and
+ the script an interval are missing Similar to the interval this is a number with a V(s) or V(m) suffix to signify
+ the units of seconds or minutes, for example V(15s) or V(1m). If no suffix is supplied V(s) is used by default, for
+ example V(10) is equivalent to V(10s).
- Mutually exclusive with O(args), O(tcp) and O(http).
type: str
tcp:
description:
- - Checks can be registered with a TCP port. This means that consul
- will check if the connection attempt to that port is successful (that is, the port is currently accepting connections).
- The format is V(host:port), for example V(localhost:80).
+ - Checks can be registered with a TCP port. This means that Consul will check if the connection attempt to that port
+ is successful (that is, the port is currently accepting connections). The format is V(host:port), for example V(localhost:80).
- Requires O(interval) to be provided.
- Mutually exclusive with O(args), O(ttl) and O(http).
type: str
version_added: '1.3.0'
http:
description:
- - Checks can be registered with an HTTP endpoint. This means that consul
- will check that the http endpoint returns a successful HTTP status.
+ - Checks can be registered with an HTTP endpoint. This means that Consul checks that the HTTP endpoint returns a successful
+ HTTP status.
- Requires O(interval) to be provided.
- Mutually exclusive with O(args), O(ttl) and O(tcp).
type: str
timeout:
description:
- - A custom HTTP check timeout. The consul default is 10 seconds.
- Similar to the interval this is a number with a V(s) or V(m) suffix to
- signify the units of seconds or minutes, for example V(15s) or V(1m).
- If no suffix is supplied V(s) will be used by default, for example V(10) will be V(10s).
+ - A custom HTTP check timeout. The Consul default is 10 seconds. Similar to the interval this is a number with a V(s)
+ or V(m) suffix to signify the units of seconds or minutes, for example V(15s) or V(1m). If no suffix is supplied V(s)
+ is used by default, for example V(10) is equivalent to V(10s).
type: str
service_id:
description:
- - The ID for the service, must be unique per node. If O(state=absent),
- defaults to the service name if supplied.
+ - The ID for the service, must be unique per node. If O(state=absent), defaults to the service name if supplied.
type: str
-'''
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Register tcp check for service 'nginx'
community.general.consul_agent_check:
name: nginx_tcp_check
@@ -138,24 +130,24 @@ EXAMPLES = '''
state: absent
id: nginx_http_check
service_id: "{{ nginx_service.ID }}"
-'''
+"""
-RETURN = """
+RETURN = r"""
check:
- description: The check as returned by the consul HTTP API.
- returned: always
- type: dict
- sample:
- CheckID: nginx_check
- ServiceID: nginx
- Interval: 30s
- Type: http
- Notes: Nginx Check
+ description: The check as returned by the Consul HTTP API.
+ returned: always
+ type: dict
+ sample:
+ CheckID: nginx_check
+ ServiceID: nginx
+ Interval: 30s
+ Type: http
+ Notes: Nginx Check
operation:
- description: The operation performed.
- returned: changed
- type: str
- sample: update
+ description: The operation performed.
+ returned: changed
+ type: str
+ sample: update
"""
from ansible.module_utils.basic import AnsibleModule
diff --git a/plugins/modules/consul_agent_service.py b/plugins/modules/consul_agent_service.py
index a8ef098970..882e45dceb 100644
--- a/plugins/modules/consul_agent_service.py
+++ b/plugins/modules/consul_agent_service.py
@@ -9,17 +9,15 @@ from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = '''
+DOCUMENTATION = r"""
module: consul_agent_service
-short_description: Add, modify and delete services within a consul cluster
+short_description: Add, modify and delete services within a Consul cluster
version_added: 9.1.0
description:
- - Allows the addition, modification and deletion of services in a consul
- cluster via the agent.
- - There are currently no plans to create services and checks in one.
- This is because the Consul API does not provide checks for a service and
- the checks themselves do not match the module parameters.
- Therefore, only a service without checks can be created in this module.
+ - Allows the addition, modification and deletion of services in a Consul cluster using the agent.
+ - There are currently no plans to create services and checks in one. This is because the Consul API does not provide checks
+ for a service and the checks themselves do not match the module parameters. Therefore, only a service without checks can
+ be created in this module.
author:
- Michael Ilg (@Ilgmi)
extends_documentation_fragment:
@@ -33,7 +31,7 @@ attributes:
diff_mode:
support: partial
details:
- - In check mode the diff will miss operational attributes.
+ - In check mode the diff misses operational attributes.
options:
state:
description:
@@ -43,51 +41,47 @@ options:
type: str
name:
description:
- - Unique name for the service on a node, must be unique per node,
- required if registering a service.
+ - Unique name for the service on a node, must be unique per node, required if registering a service.
type: str
id:
description:
- - Specifies a unique ID for this service. This must be unique per agent. This defaults to the O(name) parameter if not provided.
- If O(state=absent), defaults to the service name if supplied.
+ - Specifies a unique ID for this service. This must be unique per agent. This defaults to the O(name) parameter if not
+ provided. If O(state=absent), defaults to the service name if supplied.
type: str
tags:
description:
- - Tags that will be attached to the service registration.
+ - Tags that are attached to the service registration.
type: list
elements: str
address:
description:
- - The address to advertise that the service will be listening on.
- This value will be passed as the C(address) parameter to Consul's
- C(/v1/agent/service/register) API method, so refer to the Consul API
- documentation for further details.
+ - The address to advertise that the service listens on. This value is passed as the C(address) parameter to Consul's
+ C(/v1/agent/service/register) API method, so refer to the Consul API documentation for further details.
type: str
meta:
description:
- - Optional meta data used for filtering.
- For keys, the characters C(A-Z), C(a-z), C(0-9), C(_), C(-) are allowed.
- Not allowed characters are replaced with underscores.
+ - Optional meta data used for filtering. For keys, the characters C(A-Z), C(a-z), C(0-9), C(_), C(-) are allowed. Not
+ allowed characters are replaced with underscores.
type: dict
service_port:
description:
- - The port on which the service is listening. Can optionally be supplied for
- registration of a service, that is if O(name) or O(id) is set.
+ - The port on which the service is listening. Can optionally be supplied for registration of a service, that is if O(name)
+ or O(id) is set.
type: int
enable_tag_override:
description:
- - Specifies to disable the anti-entropy feature for this service's tags.
- If EnableTagOverride is set to true then external agents can update this service in the catalog and modify the tags.
+ - Specifies to disable the anti-entropy feature for this service's tags. If C(EnableTagOverride) is set to true then
+ external agents can update this service in the catalog and modify the tags.
type: bool
- default: False
+ default: false
weights:
description:
- - Specifies weights for the service
+ - Specifies weights for the service.
type: dict
suboptions:
passing:
description:
- - Weights for passing.
+ - Weights for passing.
type: int
default: 1
warning:
@@ -96,10 +90,10 @@ options:
type: int
default: 1
default: {"passing": 1, "warning": 1}
-'''
+"""
-EXAMPLES = '''
-- name: Register nginx service with the local consul agent
+EXAMPLES = r"""
+- name: Register nginx service with the local Consul agent
community.general.consul_agent_service:
host: consul1.example.com
token: some_management_acl
@@ -162,33 +156,33 @@ EXAMPLES = '''
tags:
- prod
- worker
-'''
+"""
-RETURN = """
+RETURN = r"""
service:
- description: The service as returned by the consul HTTP API.
- returned: always
- type: dict
- sample:
- ID: nginx
- Service: nginx
- Address: localhost
- Port: 80
- Tags:
- - http
- Meta:
- - nginx_version: 1.23.3
- Datacenter: dc1
- Weights:
- Passing: 1
- Warning: 1
- ContentHash: 61a245cd985261ac
- EnableTagOverride: false
+ description: The service as returned by the Consul HTTP API.
+ returned: always
+ type: dict
+ sample:
+ ID: nginx
+ Service: nginx
+ Address: localhost
+ Port: 80
+ Tags:
+ - http
+ Meta:
+ - nginx_version: 1.23.3
+ Datacenter: dc1
+ Weights:
+ Passing: 1
+ Warning: 1
+ ContentHash: 61a245cd985261ac
+ EnableTagOverride: false
operation:
- description: The operation performed.
- returned: changed
- type: str
- sample: update
+ description: The operation performed.
+ returned: changed
+ type: str
+ sample: update
"""
from ansible.module_utils.basic import AnsibleModule
diff --git a/plugins/modules/consul_auth_method.py b/plugins/modules/consul_auth_method.py
index e28474c313..4658f906e3 100644
--- a/plugins/modules/consul_auth_method.py
+++ b/plugins/modules/consul_auth_method.py
@@ -9,14 +9,13 @@ from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = """
+DOCUMENTATION = r"""
module: consul_auth_method
short_description: Manipulate Consul auth methods
version_added: 8.3.0
description:
- - Allows the addition, modification and deletion of auth methods in a consul
- cluster via the agent. For more details on using and configuring ACLs,
- see U(https://www.consul.io/docs/guides/acl.html).
+ - Allows the addition, modification and deletion of auth methods in a Consul cluster using the agent. For more details on
+ using and configuring ACLs, see U(https://www.consul.io/docs/guides/acl.html).
author:
- Florian Apolloner (@apollo13)
extends_documentation_fragment:
@@ -30,7 +29,7 @@ attributes:
diff_mode:
support: partial
details:
- - In check mode the diff will miss operational attributes.
+ - In check mode the diff misses operational attributes.
options:
state:
description:
@@ -72,12 +71,12 @@ options:
config:
description:
- The raw configuration to use for the chosen auth method.
- - Contents will vary depending upon the type chosen.
+ - Contents vary depending upon the O(type) chosen.
- Required when the auth method is created.
type: dict
"""
-EXAMPLES = """
+EXAMPLES = r"""
- name: Create an auth method
community.general.consul_auth_method:
name: test
@@ -103,9 +102,9 @@ EXAMPLES = """
token: "{{ consul_management_token }}"
"""
-RETURN = """
+RETURN = r"""
auth_method:
- description: The auth method as returned by the consul HTTP API.
+ description: The auth method as returned by the Consul HTTP API.
returned: always
type: dict
sample:
@@ -126,10 +125,10 @@ auth_method:
Name: test
Type: jwt
operation:
- description: The operation performed.
- returned: changed
- type: str
- sample: update
+ description: The operation performed.
+ returned: changed
+ type: str
+ sample: update
"""
import re
diff --git a/plugins/modules/consul_binding_rule.py b/plugins/modules/consul_binding_rule.py
index 6a2882cee2..0a4531fdf7 100644
--- a/plugins/modules/consul_binding_rule.py
+++ b/plugins/modules/consul_binding_rule.py
@@ -9,14 +9,13 @@ from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = """
+DOCUMENTATION = r"""
module: consul_binding_rule
short_description: Manipulate Consul binding rules
version_added: 8.3.0
description:
- - Allows the addition, modification and deletion of binding rules in a consul
- cluster via the agent. For more details on using and configuring binding rules,
- see U(https://developer.hashicorp.com/consul/api-docs/acl/binding-rules).
+ - Allows the addition, modification and deletion of binding rules in a Consul cluster using the agent. For more details
+ on using and configuring binding rules, see U(https://developer.hashicorp.com/consul/api-docs/acl/binding-rules).
author:
- Florian Apolloner (@apollo13)
extends_documentation_fragment:
@@ -30,7 +29,7 @@ attributes:
diff_mode:
support: partial
details:
- - In check mode the diff will miss operational attributes.
+ - In check mode the diff misses operational attributes.
options:
state:
description:
@@ -41,7 +40,8 @@ options:
name:
description:
- Specifies a name for the binding rule.
- - 'Note: This is used to identify the binding rule. But since the API does not support a name, it is prefixed to the description.'
+ - 'Note: This is used to identify the binding rule. But since the API does not support a name, it is prefixed to the
+ description.'
type: str
required: true
description:
@@ -74,7 +74,7 @@ options:
type: dict
"""
-EXAMPLES = """
+EXAMPLES = r"""
- name: Create a binding rule
community.general.consul_binding_rule:
name: my_name
@@ -91,9 +91,9 @@ EXAMPLES = """
state: absent
"""
-RETURN = """
+RETURN = r"""
binding_rule:
- description: The binding rule as returned by the consul HTTP API.
+ description: The binding rule as returned by the Consul HTTP API.
returned: always
type: dict
sample:
diff --git a/plugins/modules/consul_kv.py b/plugins/modules/consul_kv.py
index 84169fc6b7..2987e71a86 100644
--- a/plugins/modules/consul_kv.py
+++ b/plugins/modules/consul_kv.py
@@ -10,15 +10,14 @@ from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = '''
+DOCUMENTATION = r"""
module: consul_kv
-short_description: Manipulate entries in the key/value store of a consul cluster
+short_description: Manipulate entries in the key/value store of a Consul cluster
description:
- - Allows the retrieval, addition, modification and deletion of key/value entries in a
- consul cluster via the agent. The entire contents of the record, including
- the indices, flags and session are returned as C(value).
- - If the O(key) represents a prefix then note that when a value is removed, the existing
- value if any is returned as part of the results.
+ - Allows the retrieval, addition, modification and deletion of key/value entries in a Consul cluster using the agent. The
+ entire contents of the record, including the indices, flags and session are returned as C(value).
+ - If the O(key) represents a prefix then note that when a value is removed, the existing value if any is returned as part
+ of the results.
- See http://www.consul.io/docs/agent/http.html#kv for more details.
requirements:
- python-consul
@@ -29,92 +28,87 @@ author:
extends_documentation_fragment:
- community.general.attributes
attributes:
- check_mode:
- support: full
- diff_mode:
- support: none
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
options:
- state:
- description:
- - The action to take with the supplied key and value. If the state is V(present) and O(value) is set, the key
- contents will be set to the value supplied and C(changed) will be set to V(true) only if the value was
- different to the current contents. If the state is V(present) and O(value) is not set, the existing value
- associated to the key will be returned. The state V(absent) will remove the key/value pair,
- again C(changed) will be set to V(true) only if the key actually existed
- prior to the removal. An attempt can be made to obtain or free the
- lock associated with a key/value pair with the states V(acquire) or
- V(release) respectively. a valid session must be supplied to make the
- attempt changed will be true if the attempt is successful, false
- otherwise.
- type: str
- choices: [ absent, acquire, present, release ]
- default: present
- key:
- description:
- - The key at which the value should be stored.
- type: str
- required: true
- value:
- description:
- - The value should be associated with the given key, required if O(state)
- is V(present).
- type: str
- recurse:
- description:
- - If the key represents a prefix, each entry with the prefix can be
- retrieved by setting this to V(true).
- type: bool
- retrieve:
- description:
- - If the O(state) is V(present) and O(value) is set, perform a
- read after setting the value and return this value.
- default: true
- type: bool
- session:
- description:
- - The session that should be used to acquire or release a lock
- associated with a key/value pair.
- type: str
- token:
- description:
- - The token key identifying an ACL rule set that controls access to
- the key value pair
- type: str
- cas:
- description:
- - Used when acquiring a lock with a session. If the O(cas) is V(0), then
- Consul will only put the key if it does not already exist. If the
- O(cas) value is non-zero, then the key is only set if the index matches
- the ModifyIndex of that key.
- type: str
- flags:
- description:
- - Opaque positive integer value that can be passed when setting a value.
- type: str
- host:
- description:
- - Host of the consul agent.
- type: str
- default: localhost
- port:
- description:
- - The port on which the consul agent is running.
- type: int
- default: 8500
- scheme:
- description:
- - The protocol scheme on which the consul agent is running.
- type: str
- default: http
- validate_certs:
- description:
- - Whether to verify the tls certificate of the consul agent.
- type: bool
- default: true
-'''
+ state:
+ description:
+ - The action to take with the supplied key and value. If the state is V(present) and O(value) is set, the key contents
+ is set to the value supplied and C(changed) is set to V(true) only if the value was different to the current contents.
+ If the state is V(present) and O(value) is not set, the existing value associated to the key is returned. The state
+ V(absent) is used to remove the key/value pair, again C(changed) is set to V(true) only if the key actually existed
+ prior to the removal. An attempt can be made to obtain or free the lock associated with a key/value pair with the
+ states V(acquire) or V(release) respectively. A valid session must be supplied to make the attempt C(changed) is V(true)
+ if the attempt is successful, V(false) otherwise.
+ type: str
+ choices: [absent, acquire, present, release]
+ default: present
+ key:
+ description:
+ - The key at which the value should be stored.
+ type: str
+ required: true
+ value:
+ description:
+ - The value should be associated with the given key, required if O(state) is V(present).
+ type: str
+ recurse:
+ description:
+ - If the key represents a prefix, each entry with the prefix can be retrieved by setting this to V(true).
+ type: bool
+ retrieve:
+ description:
+ - If the O(state) is V(present) and O(value) is set, perform a read after setting the value and return this value.
+ default: true
+ type: bool
+ session:
+ description:
+ - The session that should be used to acquire or release a lock associated with a key/value pair.
+ type: str
+ token:
+ description:
+ - The token key identifying an ACL rule set that controls access to the key value pair.
+ type: str
+ cas:
+ description:
+ - Used when acquiring a lock with a session. If the O(cas) is V(0), then Consul only puts the key if it does not already
+ exist. If the O(cas) value is non-zero, then the key is only set if the index matches the ModifyIndex of that key.
+ type: str
+ flags:
+ description:
+ - Opaque positive integer value that can be passed when setting a value.
+ type: str
+ host:
+ description:
+ - Host of the Consul agent.
+ type: str
+ default: localhost
+ port:
+ description:
+ - The port on which the Consul agent is running.
+ type: int
+ default: 8500
+ scheme:
+ description:
+ - The protocol scheme on which the Consul agent is running.
+ type: str
+ default: http
+ validate_certs:
+ description:
+ - Whether to verify the tls certificate of the Consul agent.
+ type: bool
+ default: true
+ datacenter:
+ description:
+ - The name of the datacenter to query. If unspecified, the query defaults to the datacenter of the Consul agent on O(host).
+ type: str
+ version_added: 10.0.0
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
# If the key does not exist, the value associated to the "data" property in `retrieved_key` will be `None`
# If the key value is empty string, `retrieved_key["data"]["Value"]` will be `None`
- name: Retrieve a value from the key/value store
@@ -132,7 +126,7 @@ EXAMPLES = '''
key: somekey
state: absent
-- name: Add a node to an arbitrary group via consul inventory (see consul.ini)
+- name: Add a node to an arbitrary group using Consul inventory (see consul.ini)
community.general.consul_kv:
key: ansible/groups/dc1/somenode
value: top_secret
@@ -143,7 +137,7 @@ EXAMPLES = '''
value: 20160509
session: "{{ sessionid }}"
state: acquire
-'''
+"""
from ansible.module_utils.common.text.converters import to_text
@@ -291,7 +285,8 @@ def get_consul_api(module):
port=module.params.get('port'),
scheme=module.params.get('scheme'),
verify=module.params.get('validate_certs'),
- token=module.params.get('token'))
+ token=module.params.get('token'),
+ dc=module.params.get('datacenter'))
def test_dependencies(module):
@@ -305,6 +300,7 @@ def main():
module = AnsibleModule(
argument_spec=dict(
cas=dict(type='str'),
+ datacenter=dict(type='str'),
flags=dict(type='str'),
key=dict(type='str', required=True, no_log=False),
host=dict(type='str', default='localhost'),
diff --git a/plugins/modules/consul_policy.py b/plugins/modules/consul_policy.py
index 36139ac097..e009e44434 100644
--- a/plugins/modules/consul_policy.py
+++ b/plugins/modules/consul_policy.py
@@ -9,14 +9,13 @@ from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = """
+DOCUMENTATION = r"""
module: consul_policy
short_description: Manipulate Consul policies
version_added: 7.2.0
description:
- - Allows the addition, modification and deletion of policies in a consul
- cluster via the agent. For more details on using and configuring ACLs,
- see U(https://www.consul.io/docs/guides/acl.html).
+ - Allows the addition, modification and deletion of policies in a Consul cluster using the agent. For more details on using
+ and configuring ACLs, see U(https://www.consul.io/docs/guides/acl.html).
author:
- Håkon Lerring (@Hakon)
extends_documentation_fragment:
@@ -32,7 +31,7 @@ attributes:
support: partial
version_added: 8.3.0
details:
- - In check mode the diff will miss operational attributes.
+ - In check mode the diff misses operational attributes.
action_group:
version_added: 8.3.0
options:
@@ -49,8 +48,7 @@ options:
elements: str
name:
description:
- - The name that should be associated with the policy, this is opaque
- to Consul.
+ - The name that should be associated with the policy, this is opaque to Consul.
required: true
type: str
description:
@@ -63,19 +61,19 @@ options:
- Rule document that should be associated with the current policy.
"""
-EXAMPLES = """
+EXAMPLES = r"""
- name: Create a policy with rules
community.general.consul_policy:
host: consul1.example.com
token: some_management_acl
name: foo-access
rules: |
- key "foo" {
- policy = "read"
- }
- key "private/foo" {
- policy = "deny"
- }
+ key "foo" {
+ policy = "read"
+ }
+ key "private/foo" {
+ policy = "deny"
+ }
- name: Update the rules associated to a policy
community.general.consul_policy:
@@ -83,15 +81,15 @@ EXAMPLES = """
token: some_management_acl
name: foo-access
rules: |
- key "foo" {
- policy = "read"
- }
- key "private/foo" {
- policy = "deny"
- }
- event "bbq" {
- policy = "write"
- }
+ key "foo" {
+ policy = "read"
+ }
+ key "private/foo" {
+ policy = "deny"
+ }
+ event "bbq" {
+ policy = "write"
+ }
- name: Remove a policy
community.general.consul_policy:
@@ -101,28 +99,28 @@ EXAMPLES = """
state: absent
"""
-RETURN = """
+RETURN = r"""
policy:
- description: The policy as returned by the consul HTTP API.
- returned: always
- type: dict
- sample:
- CreateIndex: 632
- Description: Testing
- Hash: rj5PeDHddHslkpW7Ij4OD6N4bbSXiecXFmiw2SYXg2A=
- Name: foo-access
- Rules: |-
- key "foo" {
- policy = "read"
- }
- key "private/foo" {
- policy = "deny"
- }
+ description: The policy as returned by the Consul HTTP API.
+ returned: always
+ type: dict
+ sample:
+ CreateIndex: 632
+ Description: Testing
+ Hash: rj5PeDHddHslkpW7Ij4OD6N4bbSXiecXFmiw2SYXg2A=
+ Name: foo-access
+ Rules: |-
+ key "foo" {
+ policy = "read"
+ }
+ key "private/foo" {
+ policy = "deny"
+ }
operation:
- description: The operation performed.
- returned: changed
- type: str
- sample: update
+ description: The operation performed.
+ returned: changed
+ type: str
+ sample: update
"""
from ansible.module_utils.basic import AnsibleModule
@@ -134,7 +132,7 @@ from ansible_collections.community.general.plugins.module_utils.consul import (
_ARGUMENT_SPEC = {
"name": dict(required=True),
- "description": dict(required=False, type="str"),
+ "description": dict(type="str"),
"rules": dict(type="str"),
"valid_datacenters": dict(type="list", elements="str"),
"state": dict(default="present", choices=["present", "absent"]),
diff --git a/plugins/modules/consul_role.py b/plugins/modules/consul_role.py
index d6c4e4dd92..4efbef699a 100644
--- a/plugins/modules/consul_role.py
+++ b/plugins/modules/consul_role.py
@@ -9,14 +9,13 @@ from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = """
+DOCUMENTATION = r"""
module: consul_role
short_description: Manipulate Consul roles
version_added: 7.5.0
description:
- - Allows the addition, modification and deletion of roles in a consul
- cluster via the agent. For more details on using and configuring ACLs,
- see U(https://www.consul.io/docs/guides/acl.html).
+ - Allows the addition, modification and deletion of roles in a Consul cluster using the agent. For more details on using
+ and configuring ACLs, see U(https://www.consul.io/docs/guides/acl.html).
author:
- Håkon Lerring (@Hakon)
extends_documentation_fragment:
@@ -30,7 +29,7 @@ attributes:
diff_mode:
support: partial
details:
- - In check mode the diff will miss operational attributes.
+ - In check mode the diff misses operational attributes.
version_added: 8.3.0
action_group:
version_added: 8.3.0
@@ -42,22 +41,22 @@ options:
type: str
state:
description:
- - whether the role should be present or absent.
+ - Whether the role should be present or absent.
choices: ['present', 'absent']
default: present
type: str
description:
description:
- Description of the role.
- - If not specified, the assigned description will not be changed.
+ - If not specified, the assigned description is not changed.
type: str
policies:
type: list
elements: dict
description:
- List of policies to attach to the role. Each policy is a dict.
- - If the parameter is left blank, any policies currently assigned will not be changed.
- - Any empty array (V([])) will clear any policies previously set.
+ - If the parameter is left blank, any policies currently assigned are not changed.
+ - Any empty array (V([])) clears any policies previously set.
suboptions:
name:
description:
@@ -91,26 +90,26 @@ options:
elements: dict
description:
- List of service identities to attach to the role.
- - If not specified, any service identities currently assigned will not be changed.
- - If the parameter is an empty array (V([])), any node identities assigned will be unassigned.
+ - If not specified, any service identities currently assigned are not changed.
+ - If the parameter is an empty array (V([])), any node identities assigned are unassigned.
suboptions:
service_name:
description:
- The name of the node.
- Must not be longer than 256 characters, must start and end with a lowercase alphanumeric character.
- - May only contain lowercase alphanumeric characters as well as - and _.
- - This suboption has been renamed from O(service_identities[].name) to O(service_identities[].service_name)
- in community.general 8.3.0. The old name can still be used.
+ - May only contain lowercase alphanumeric characters as well as V(-) and V(_).
+ - This suboption has been renamed from O(service_identities[].name) to O(service_identities[].service_name) in community.general
+ 8.3.0. The old name can still be used.
type: str
required: true
aliases:
- name
datacenters:
description:
- - The datacenters the policies will be effective.
- - This will result in effective policy only being valid in this datacenter.
- - If an empty array (V([])) is specified, the policies will valid in all datacenters.
- - including those which do not yet exist but may in the future.
+ - The datacenters where the policies are effective.
+ - This results in effective policy only being valid in this datacenter.
+ - If an empty array (V([])) is specified, the policies are valid in all datacenters.
+ - Including those which do not yet exist but may in the future.
type: list
elements: str
node_identities:
@@ -118,16 +117,16 @@ options:
elements: dict
description:
- List of node identities to attach to the role.
- - If not specified, any node identities currently assigned will not be changed.
- - If the parameter is an empty array (V([])), any node identities assigned will be unassigned.
+ - If not specified, any node identities currently assigned are not changed.
+ - If the parameter is an empty array (V([])), any node identities assigned are unassigned.
suboptions:
node_name:
description:
- The name of the node.
- Must not be longer than 256 characters, must start and end with a lowercase alphanumeric character.
- - May only contain lowercase alphanumeric characters as well as - and _.
- - This suboption has been renamed from O(node_identities[].name) to O(node_identities[].node_name)
- in community.general 8.3.0. The old name can still be used.
+ - May only contain lowercase alphanumeric characters as well as V(-) and V(_).
+ - This suboption has been renamed from O(node_identities[].name) to O(node_identities[].node_name) in community.general
+ 8.3.0. The old name can still be used.
type: str
required: true
aliases:
@@ -135,12 +134,12 @@ options:
datacenter:
description:
- The nodes datacenter.
- - This will result in effective policy only being valid in this datacenter.
+ - This results in effective policy only being valid in this datacenter.
type: str
required: true
"""
-EXAMPLES = """
+EXAMPLES = r"""
- name: Create a role with 2 policies
community.general.consul_role:
host: consul1.example.com
@@ -177,28 +176,31 @@ EXAMPLES = """
state: absent
"""
-RETURN = """
+RETURN = r"""
role:
- description: The role object.
- returned: success
- type: dict
- sample:
+ description: The role object.
+ returned: success
+ type: dict
+ sample:
+ {
+ "CreateIndex": 39,
+ "Description": "",
+ "Hash": "Trt0QJtxVEfvTTIcdTUbIJRr6Dsi6E4EcwSFxx9tCYM=",
+ "ID": "9a300b8d-48db-b720-8544-a37c0f5dafb5",
+ "ModifyIndex": 39,
+ "Name": "foo-role",
+ "Policies": [
{
- "CreateIndex": 39,
- "Description": "",
- "Hash": "Trt0QJtxVEfvTTIcdTUbIJRr6Dsi6E4EcwSFxx9tCYM=",
- "ID": "9a300b8d-48db-b720-8544-a37c0f5dafb5",
- "ModifyIndex": 39,
- "Name": "foo-role",
- "Policies": [
- {"ID": "b1a00172-d7a1-0e66-a12e-7a4045c4b774", "Name": "foo-access"}
- ]
+ "ID": "b1a00172-d7a1-0e66-a12e-7a4045c4b774",
+ "Name": "foo-access"
}
+ ]
+ }
operation:
- description: The operation performed on the role.
- returned: changed
- type: str
- sample: update
+ description: The operation performed on the role.
+ returned: changed
+ type: str
+ sample: update
"""
from ansible.module_utils.basic import AnsibleModule
diff --git a/plugins/modules/consul_session.py b/plugins/modules/consul_session.py
index 87a5f19143..637b09aff2 100644
--- a/plugins/modules/consul_session.py
+++ b/plugins/modules/consul_session.py
@@ -8,14 +8,13 @@
from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = '''
+DOCUMENTATION = r"""
module: consul_session
-short_description: Manipulate consul sessions
+short_description: Manipulate Consul sessions
description:
- - Allows the addition, modification and deletion of sessions in a consul
- cluster. These sessions can then be used in conjunction with key value pairs
- to implement distributed locks. In depth documentation for working with
- sessions can be found at http://www.consul.io/docs/internals/sessions.html
+ - Allows the addition, modification and deletion of sessions in a Consul cluster. These sessions can then be used in conjunction
+ with key value pairs to implement distributed locks. In depth documentation for working with sessions can be found at
+ U(http://www.consul.io/docs/internals/sessions.html).
author:
- Steve Gargan (@sgargan)
- Håkon Lerring (@Hakon)
@@ -25,78 +24,69 @@ extends_documentation_fragment:
- community.general.consul.token
- community.general.attributes
attributes:
- check_mode:
- support: none
- diff_mode:
- support: none
- action_group:
- version_added: 8.3.0
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+ action_group:
+ version_added: 8.3.0
options:
- id:
- description:
- - ID of the session, required when O(state) is either V(info) or
- V(remove).
- type: str
- state:
- description:
- - Whether the session should be present i.e. created if it doesn't
- exist, or absent, removed if present. If created, the O(id) for the
- session is returned in the output. If V(absent), O(id) is
- required to remove the session. Info for a single session, all the
- sessions for a node or all available sessions can be retrieved by
- specifying V(info), V(node) or V(list) for the O(state); for V(node)
- or V(info), the node O(name) or session O(id) is required as parameter.
- choices: [ absent, info, list, node, present ]
- type: str
- default: present
- name:
- description:
- - The name that should be associated with the session. Required when
- O(state=node) is used.
- type: str
- delay:
- description:
- - The optional lock delay that can be attached to the session when it
- is created. Locks for invalidated sessions ar blocked from being
- acquired until this delay has expired. Durations are in seconds.
- type: int
- default: 15
- node:
- description:
- - The name of the node that with which the session will be associated.
- by default this is the name of the agent.
- type: str
- datacenter:
- description:
- - The name of the datacenter in which the session exists or should be
- created.
- type: str
- checks:
- description:
- - Checks that will be used to verify the session health. If
- all the checks fail, the session will be invalidated and any locks
- associated with the session will be release and can be acquired once
- the associated lock delay has expired.
- type: list
- elements: str
- behavior:
- description:
- - The optional behavior that can be attached to the session when it
- is created. This controls the behavior when a session is invalidated.
- choices: [ delete, release ]
- type: str
- default: release
- ttl:
- description:
- - Specifies the duration of a session in seconds (between 10 and 86400).
- type: int
- version_added: 5.4.0
- token:
- version_added: 5.6.0
-'''
+ id:
+ description:
+ - ID of the session, required when O(state) is either V(info) or V(remove).
+ type: str
+ state:
+ description:
+ - Whether the session should be present, in other words it should be created if it does not exist, or absent, removed
+ if present. If created, the O(id) for the session is returned in the output. If V(absent), O(id) is required to remove
+ the session. Info for a single session, all the sessions for a node or all available sessions can be retrieved by
+ specifying V(info), V(node) or V(list) for the O(state); for V(node) or V(info), the node O(name) or session O(id)
+ is required as parameter.
+ choices: [absent, info, list, node, present]
+ type: str
+ default: present
+ name:
+ description:
+ - The name that should be associated with the session. Required when O(state=node) is used.
+ type: str
+ delay:
+ description:
+ - The optional lock delay that can be attached to the session when it is created. Locks for invalidated sessions ar
+ blocked from being acquired until this delay has expired. Durations are in seconds.
+ type: int
+ default: 15
+ node:
+ description:
+ - The name of the node that with which the session is associated. By default this is the name of the agent.
+ type: str
+ datacenter:
+ description:
+ - The name of the datacenter in which the session exists or should be created.
+ type: str
+ checks:
+ description:
+ - Checks that are used to verify the session health. If all the checks fail, the session is invalidated and any locks
+ associated with the session are released and can be acquired once the associated lock delay has expired.
+ type: list
+ elements: str
+ behavior:
+ description:
+ - The optional behavior that can be attached to the session when it is created. This controls the behavior when a session
+ is invalidated.
+ choices: [delete, release]
+ type: str
+ default: release
+ ttl:
+ description:
+ - Specifies the duration of a session in seconds (between 10 and 86400).
+ type: int
+ version_added: 5.4.0
+ token:
+ version_added: 5.6.0
+"""
-EXAMPLES = '''
-- name: Register basic session with consul
+EXAMPLES = r"""
+- name: Register basic session with Consul
community.general.consul_session:
name: session1
@@ -123,8 +113,8 @@ EXAMPLES = '''
- name: Register session with a ttl
community.general.consul_session:
name: session-with-ttl
- ttl: 600 # sec
-'''
+ ttl: 600 # sec
+"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.general.plugins.module_utils.consul import (
diff --git a/plugins/modules/consul_token.py b/plugins/modules/consul_token.py
index 6cb8b38b84..1e5aa19f4c 100644
--- a/plugins/modules/consul_token.py
+++ b/plugins/modules/consul_token.py
@@ -9,14 +9,13 @@ from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = """
+DOCUMENTATION = r"""
module: consul_token
short_description: Manipulate Consul tokens
version_added: 8.3.0
description:
- - Allows the addition, modification and deletion of tokens in a consul
- cluster via the agent. For more details on using and configuring ACLs,
- see U(https://www.consul.io/docs/guides/acl.html).
+ - Allows the addition, modification and deletion of tokens in a Consul cluster using the agent. For more details on using
+ and configuring ACLs, see U(https://www.consul.io/docs/guides/acl.html).
author:
- Florian Apolloner (@apollo13)
extends_documentation_fragment:
@@ -30,7 +29,7 @@ attributes:
diff_mode:
support: partial
details:
- - In check mode the diff will miss operational attributes.
+ - In check mode the diff misses operational attributes.
action_group:
version_added: 8.3.0
options:
@@ -42,13 +41,11 @@ options:
type: str
accessor_id:
description:
- - Specifies a UUID to use as the token's Accessor ID.
- If not specified a UUID will be generated for this field.
+ - Specifies a UUID to use as the token's Accessor ID. If not specified a UUID is generated for this field.
type: str
secret_id:
description:
- - Specifies a UUID to use as the token's Secret ID.
- If not specified a UUID will be generated for this field.
+ - Specifies a UUID to use as the token's Secret ID. If not specified a UUID is generated for this field.
type: str
description:
description:
@@ -59,8 +56,8 @@ options:
elements: dict
description:
- List of policies to attach to the token. Each policy is a dict.
- - If the parameter is left blank, any policies currently assigned will not be changed.
- - Any empty array (V([])) will clear any policies previously set.
+ - If the parameter is left blank, any policies currently assigned are not changed.
+ - Any empty array (V([])) clears any policies previously set.
suboptions:
name:
description:
@@ -77,8 +74,8 @@ options:
elements: dict
description:
- List of roles to attach to the token. Each role is a dict.
- - If the parameter is left blank, any roles currently assigned will not be changed.
- - Any empty array (V([])) will clear any roles previously set.
+ - If the parameter is left blank, any roles currently assigned are not changed.
+ - Any empty array (V([])) clears any roles previously set.
suboptions:
name:
description:
@@ -111,8 +108,8 @@ options:
elements: dict
description:
- List of service identities to attach to the token.
- - If not specified, any service identities currently assigned will not be changed.
- - If the parameter is an empty array (V([])), any node identities assigned will be unassigned.
+ - If not specified, any service identities currently assigned are not changed.
+ - If the parameter is an empty array (V([])), any node identities assigned are unassigned.
suboptions:
service_name:
description:
@@ -123,9 +120,9 @@ options:
required: true
datacenters:
description:
- - The datacenters the token will be effective.
- - If an empty array (V([])) is specified, the token will valid in all datacenters.
- - including those which do not yet exist but may in the future.
+ - The datacenters where the token is effective.
+ - If an empty array (V([])) is specified, the token is valid in all datacenters.
+ - Including those which do not yet exist but may in the future.
type: list
elements: str
node_identities:
@@ -133,8 +130,8 @@ options:
elements: dict
description:
- List of node identities to attach to the token.
- - If not specified, any node identities currently assigned will not be changed.
- - If the parameter is an empty array (V([])), any node identities assigned will be unassigned.
+ - If not specified, any node identities currently assigned are not changed.
+ - If the parameter is an empty array (V([])), any node identities assigned are unassigned.
suboptions:
node_name:
description:
@@ -146,23 +143,21 @@ options:
datacenter:
description:
- The nodes datacenter.
- - This will result in effective token only being valid in this datacenter.
+ - This results in effective token only being valid in this datacenter.
type: str
required: true
local:
description:
- - If true, indicates that the token should not be replicated globally
- and instead be local to the current datacenter.
+ - If true, indicates that the token should not be replicated globally and instead be local to the current datacenter.
type: bool
expiration_ttl:
description:
- - This is a convenience field and if set will initialize the C(expiration_time).
- Can be specified in the form of V(60s) or V(5m) (that is, 60 seconds or 5 minutes,
- respectively). Ingored when the token is updated!
+ - This is a convenience field and if set it initializes the C(expiration_time). Can be specified in the form of V(60s)
+ or V(5m) (that is, 60 seconds or 5 minutes, respectively). Ingored when the token is updated!
type: str
"""
-EXAMPLES = """
+EXAMPLES = r"""
- name: Create / Update a token by accessor_id
community.general.consul_token:
state: present
@@ -186,26 +181,26 @@ EXAMPLES = """
token: 8adddd91-0bd6-d41d-ae1a-3b49cfa9a0e8
"""
-RETURN = """
+RETURN = r"""
token:
- description: The token as returned by the consul HTTP API.
- returned: always
- type: dict
- sample:
- AccessorID: 07a7de84-c9c7-448a-99cc-beaf682efd21
- CreateIndex: 632
- CreateTime: "2024-01-14T21:53:01.402749174+01:00"
- Description: Testing
- Hash: rj5PeDHddHslkpW7Ij4OD6N4bbSXiecXFmiw2SYXg2A=
- Local: false
- ModifyIndex: 633
- SecretID: bd380fba-da17-7cee-8576-8d6427c6c930
- ServiceIdentities: [{"ServiceName": "test"}]
+ description: The token as returned by the Consul HTTP API.
+ returned: always
+ type: dict
+ sample:
+ AccessorID: 07a7de84-c9c7-448a-99cc-beaf682efd21
+ CreateIndex: 632
+ CreateTime: "2024-01-14T21:53:01.402749174+01:00"
+ Description: Testing
+ Hash: rj5PeDHddHslkpW7Ij4OD6N4bbSXiecXFmiw2SYXg2A=
+ Local: false
+ ModifyIndex: 633
+ SecretID: bd380fba-da17-7cee-8576-8d6427c6c930
+ ServiceIdentities: ["ServiceName": "test"]
operation:
- description: The operation performed.
- returned: changed
- type: str
- sample: update
+ description: The operation performed.
+ returned: changed
+ type: str
+ sample: update
"""
from ansible.module_utils.basic import AnsibleModule
diff --git a/plugins/modules/copr.py b/plugins/modules/copr.py
index 80f6f06589..940fc0eedd 100644
--- a/plugins/modules/copr.py
+++ b/plugins/modules/copr.py
@@ -9,61 +9,60 @@ from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r"""
----
module: copr
short_description: Manage one of the Copr repositories
version_added: 2.0.0
description: This module can enable, disable or remove the specified repository.
author: Silvie Chlupova (@schlupov)
requirements:
- - dnf
- - dnf-plugins-core
+ - dnf
+ - dnf-plugins-core
notes:
- - Supports C(check_mode).
+ - Supports C(check_mode).
extends_documentation_fragment:
- - community.general.attributes
+ - community.general.attributes
attributes:
- check_mode:
- support: full
- diff_mode:
- support: none
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
options:
- host:
- description: The Copr host to work with.
- default: copr.fedorainfracloud.org
- type: str
- protocol:
- description: This indicate which protocol to use with the host.
- default: https
- type: str
- name:
- description: Copr directory name, for example C(@copr/copr-dev).
- required: true
- type: str
- state:
- description:
- - Whether to set this project as V(enabled), V(disabled), or V(absent).
- default: enabled
- type: str
- choices: [absent, enabled, disabled]
- chroot:
- description:
- - The name of the chroot that you want to enable/disable/remove in the project,
- for example V(epel-7-x86_64). Default chroot is determined by the operating system,
- version of the operating system, and architecture on which the module is run.
- type: str
- includepkgs:
- description: List of packages to include.
- required: false
- type: list
- elements: str
- version_added: 9.4.0
- excludepkgs:
- description: List of packages to exclude.
- required: false
- type: list
- elements: str
- version_added: 9.4.0
+ host:
+ description: The Copr host to work with.
+ default: copr.fedorainfracloud.org
+ type: str
+ protocol:
+ description: This indicate which protocol to use with the host.
+ default: https
+ type: str
+ name:
+ description: Copr directory name, for example C(@copr/copr-dev).
+ required: true
+ type: str
+ state:
+ description:
+ - Whether to set this project as V(enabled), V(disabled), or V(absent).
+ default: enabled
+ type: str
+ choices: [absent, enabled, disabled]
+ chroot:
+ description:
+ - The name of the chroot that you want to enable/disable/remove in the project, for example V(epel-7-x86_64). Default
+ chroot is determined by the operating system, version of the operating system, and architecture on which the module
+ is run.
+ type: str
+ includepkgs:
+ description: List of packages to include.
+ required: false
+ type: list
+ elements: str
+ version_added: 9.4.0
+ excludepkgs:
+ description: List of packages to exclude.
+ required: false
+ type: list
+ elements: str
+ version_added: 9.4.0
"""
EXAMPLES = r"""
@@ -495,8 +494,8 @@ def run_module():
name=dict(type="str", required=True),
state=dict(type="str", choices=["enabled", "disabled", "absent"], default="enabled"),
chroot=dict(type="str"),
- includepkgs=dict(type='list', elements="str", required=False),
- excludepkgs=dict(type='list', elements="str", required=False),
+ includepkgs=dict(type='list', elements="str"),
+ excludepkgs=dict(type='list', elements="str"),
)
module = AnsibleModule(argument_spec=module_args, supports_check_mode=True)
params = module.params
diff --git a/plugins/modules/cpanm.py b/plugins/modules/cpanm.py
index 25489170dd..482183c0e0 100644
--- a/plugins/modules/cpanm.py
+++ b/plugins/modules/cpanm.py
@@ -10,14 +10,13 @@ from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = """
----
+DOCUMENTATION = r"""
module: cpanm
short_description: Manages Perl library dependencies
description:
-- Manage Perl library dependencies using cpanminus.
+ - Manage Perl library dependencies using cpanminus.
extends_documentation_fragment:
-- community.general.attributes
+ - community.general.attributes
attributes:
check_mode:
support: none
@@ -27,82 +26,96 @@ options:
name:
type: str
description:
- - The Perl library to install. Valid values change according to the O(mode), see notes for more details.
- - Note that for installing from a local path the parameter O(from_path) should be used.
+ - The Perl library to install. Valid values change according to the O(mode), see notes for more details.
+ - Note that for installing from a local path the parameter O(from_path) should be used.
aliases: [pkg]
from_path:
type: path
description:
- - The local directory or C(tar.gz) file to install from.
+ - The local directory or C(tar.gz) file to install from.
notest:
description:
- - Do not run unit tests.
+ - Do not run unit tests.
type: bool
default: false
locallib:
description:
- - Specify the install base to install modules.
+ - Specify the install base to install modules.
type: path
mirror:
description:
- - Specifies the base URL for the CPAN mirror to use.
+ - Specifies the base URL for the CPAN mirror to use.
type: str
mirror_only:
description:
- - Use the mirror's index file instead of the CPAN Meta DB.
+ - Use the mirror's index file instead of the CPAN Meta DB.
type: bool
default: false
installdeps:
description:
- - Only install dependencies.
+ - Only install dependencies.
type: bool
default: false
+ install_recommendations:
+ description:
+ - If V(true), installs dependencies declared as recommends per META spec.
+ - If V(false), it ensures the dependencies declared as recommends are not installed, overriding any decision made earlier
+ in E(PERL_CPANM_OPT).
+ - If parameter is not set, C(cpanm) uses its existing defaults.
+ - When these dependencies fail to install, cpanm continues the installation, since they are just recommendation.
+ type: bool
+ version_added: 10.3.0
+ install_suggestions:
+ description:
+ - If V(true), installs dependencies declared as suggests per META spec.
+ - If V(false), it ensures the dependencies declared as suggests are not installed, overriding any decision made earlier
+ in E(PERL_CPANM_OPT).
+ - If parameter is not set, C(cpanm) uses its existing defaults.
+ - When these dependencies fail to install, cpanm continues the installation, since they are just suggestion.
+ type: bool
+ version_added: 10.3.0
version:
description:
- - Version specification for the perl module. When O(mode) is V(new), C(cpanm) version operators are accepted.
+ - Version specification for the perl module. When O(mode) is V(new), C(cpanm) version operators are accepted.
type: str
executable:
description:
- - Override the path to the cpanm executable.
+ - Override the path to the cpanm executable.
type: path
mode:
description:
- - Controls the module behavior. See notes below for more details.
- - The default changed from V(compatibility) to V(new) in community.general 9.0.0.
+ - Controls the module behavior. See notes below for more details.
+ - The default changed from V(compatibility) to V(new) in community.general 9.0.0.
type: str
choices: [compatibility, new]
default: new
version_added: 3.0.0
name_check:
description:
- - When O(mode=new), this parameter can be used to check if there is a module O(name) installed (at O(version), when specified).
+ - When O(mode=new), this parameter can be used to check if there is a module O(name) installed (at O(version), when
+ specified).
type: str
version_added: 3.0.0
notes:
-- Please note that U(http://search.cpan.org/dist/App-cpanminus/bin/cpanm, cpanm) must be installed on the remote host.
-- "This module now comes with a choice of execution O(mode): V(compatibility) or V(new)."
-- >
- O(mode=compatibility): When using V(compatibility) mode, the module will keep backward compatibility.
- This was the default mode before community.general 9.0.0.
- O(name) must be either a module name or a distribution file. If the perl module given by O(name) is installed (at the exact O(version)
- when specified), then nothing happens. Otherwise, it will be installed using the C(cpanm) executable. O(name) cannot be an URL, or a git URL.
- C(cpanm) version specifiers do not work in this mode.
-- >
- O(mode=new): When using V(new) mode, the module will behave differently. The O(name) parameter may refer to a module name, a distribution file,
- a HTTP URL or a git repository URL as described in C(cpanminus) documentation. C(cpanm) version specifiers are recognized.
- This is the default mode from community.general 9.0.0 onwards.
-
+ - Please note that U(http://search.cpan.org/dist/App-cpanminus/bin/cpanm, cpanm) must be installed on the remote host.
+ - 'This module now comes with a choice of execution O(mode): V(compatibility) or V(new).'
+ - 'O(mode=compatibility): When using V(compatibility) mode, the module keeps backward compatibility. This was the default
+ mode before community.general 9.0.0. O(name) must be either a module name or a distribution file. If the perl module given
+ by O(name) is installed (at the exact O(version) when specified), then nothing happens. Otherwise, it is installed using
+ the C(cpanm) executable. O(name) cannot be an URL, or a git URL. C(cpanm) version specifiers do not work in this mode.'
+ - 'O(mode=new): When using V(new) mode, the module behaves differently. The O(name) parameter may refer to a module name,
+ a distribution file, a HTTP URL or a git repository URL as described in C(cpanminus) documentation. C(cpanm) version specifiers
+ are recognized. This is the default mode from community.general 9.0.0 onwards.'
seealso:
-- name: C(cpanm) command manual page
- description: Manual page for the command.
- link: https://metacpan.org/dist/App-cpanminus/view/bin/cpanm
+ - name: C(cpanm) command manual page
+ description: Manual page for the command.
+ link: https://metacpan.org/dist/App-cpanminus/view/bin/cpanm
author:
-- "Franck Cuny (@fcuny)"
-- "Alexei Znamensky (@russoz)"
+ - "Franck Cuny (@fcuny)"
+ - "Alexei Znamensky (@russoz)"
"""
-EXAMPLES = """
----
+EXAMPLES = r"""
- name: Install Dancer perl package
community.general.cpanm:
name: Dancer
@@ -142,7 +155,18 @@ EXAMPLES = """
version: '1.0'
"""
+RETURN = r"""
+cpanm_version:
+ description: Version of CPANMinus.
+ type: str
+ returned: always
+ sample: "1.7047"
+ version_added: 10.0.0
+"""
+
+
import os
+import re
from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, cmd_runner_fmt
from ansible_collections.community.general.plugins.module_utils.module_helper import ModuleHelper
@@ -160,6 +184,8 @@ class CPANMinus(ModuleHelper):
mirror=dict(type='str'),
mirror_only=dict(type='bool', default=False),
installdeps=dict(type='bool', default=False),
+ install_recommendations=dict(type='bool'),
+ install_suggestions=dict(type='bool'),
executable=dict(type='path'),
mode=dict(type='str', default='new', choices=['compatibility', 'new']),
name_check=dict(type='str')
@@ -174,9 +200,11 @@ class CPANMinus(ModuleHelper):
mirror=cmd_runner_fmt.as_opt_val('--mirror'),
mirror_only=cmd_runner_fmt.as_bool("--mirror-only"),
installdeps=cmd_runner_fmt.as_bool("--installdeps"),
+ install_recommendations=cmd_runner_fmt.as_bool("--with-recommends", "--without-recommends", ignore_none=True),
+ install_suggestions=cmd_runner_fmt.as_bool("--with-suggests", "--without-suggests", ignore_none=True),
pkg_spec=cmd_runner_fmt.as_list(),
+ cpanm_version=cmd_runner_fmt.as_fixed("--version"),
)
- use_old_vardict = False
def __init_module__(self):
v = self.vars
@@ -191,6 +219,14 @@ class CPANMinus(ModuleHelper):
self.runner = CmdRunner(self.module, self.command, self.command_args_formats, check_rc=True)
self.vars.binary = self.runner.binary
+ with self.runner("cpanm_version") as ctx:
+ rc, out, err = ctx.run()
+ line = out.split('\n')[0]
+ match = re.search(r"version\s+([\d\.]+)\s+", line)
+ if not match:
+ self.do_raise("Failed to determine version number. First line of output: {0}".format(line))
+ self.vars.cpanm_version = match.group(1)
+
def _is_package_installed(self, name, locallib, version):
def process(rc, out, err):
return rc == 0
@@ -238,7 +274,16 @@ class CPANMinus(ModuleHelper):
return
pkg_spec = self.sanitize_pkg_spec_version(v[pkg_param], v.version)
- with self.runner(['notest', 'locallib', 'mirror', 'mirror_only', 'installdeps', 'pkg_spec'], output_process=process) as ctx:
+ with self.runner([
+ 'notest',
+ 'locallib',
+ 'mirror',
+ 'mirror_only',
+ 'installdeps',
+ 'install_recommendations',
+ 'install_suggestions',
+ 'pkg_spec'
+ ], output_process=process) as ctx:
self.changed = ctx.run(pkg_spec=pkg_spec)
diff --git a/plugins/modules/cronvar.py b/plugins/modules/cronvar.py
index 66fa175498..5f7d02bfc3 100644
--- a/plugins/modules/cronvar.py
+++ b/plugins/modules/cronvar.py
@@ -17,8 +17,7 @@
from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = r'''
----
+DOCUMENTATION = r"""
module: cronvar
short_description: Manage variables in crontabs
description:
@@ -44,19 +43,18 @@ options:
type: str
insertafter:
description:
- - If specified, the variable will be inserted after the variable specified.
+ - If specified, the variable is inserted after the variable specified.
- Used with O(state=present).
type: str
insertbefore:
description:
- - Used with O(state=present). If specified, the variable will be inserted
- just before the variable specified.
+ - Used with O(state=present). If specified, the variable is inserted just before the variable specified.
type: str
state:
description:
- Whether to ensure that the variable is present or absent.
type: str
- choices: [ absent, present ]
+ choices: [absent, present]
default: present
user:
description:
@@ -71,18 +69,17 @@ options:
type: str
backup:
description:
- - If set, create a backup of the crontab before it is modified.
- The location of the backup is returned in the C(backup) variable by this module.
- # TODO: C() above should be RV(), but return values have not been documented!
+ - If set, create a backup of the crontab before it is modified. The location of the backup is returned in the C(backup)
+ variable by this module.
type: bool
default: false
requirements:
- cron
author:
-- Doug Luce (@dougluce)
-'''
+ - Doug Luce (@dougluce)
+"""
-EXAMPLES = r'''
+EXAMPLES = r"""
- name: Ensure entry like "EMAIL=doug@ansibmod.con.com" exists
community.general.cronvar:
name: EMAIL
@@ -99,7 +96,7 @@ EXAMPLES = r'''
value: /var/log/yum-autoupdate.log
user: root
cron_file: ansible_yum-autoupdate
-'''
+"""
import os
import platform
@@ -138,6 +135,9 @@ class CronVar(object):
self.cron_file = cron_file
else:
self.cron_file = os.path.join('/etc/cron.d', cron_file)
+ parent_dir = os.path.dirname(self.cron_file)
+ if parent_dir and not os.path.isdir(parent_dir):
+ module.fail_json(msg="Parent directory '{}' does not exist for cron_file: '{}'".format(parent_dir, cron_file))
else:
self.cron_file = None
@@ -149,9 +149,8 @@ class CronVar(object):
if self.cron_file:
# read the cronfile
try:
- f = open(self.cron_file, 'r')
- self.lines = f.read().splitlines()
- f.close()
+ with open(self.cron_file, 'r') as f:
+ self.lines = f.read().splitlines()
except IOError:
# cron file does not exist
return
@@ -397,6 +396,8 @@ def main():
old_value = cronvar.find_variable(name)
if ensure_present:
+ if value == "" and old_value != "":
+ value = '""'
if old_value is None:
cronvar.add_variable(name, value, insertbefore, insertafter)
changed = True
diff --git a/plugins/modules/crypttab.py b/plugins/modules/crypttab.py
index ea93a97c66..5749d75cec 100644
--- a/plugins/modules/crypttab.py
+++ b/plugins/modules/crypttab.py
@@ -8,8 +8,7 @@
from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = r'''
----
+DOCUMENTATION = r"""
module: crypttab
short_description: Encrypted Linux block devices
description:
@@ -24,31 +23,27 @@ attributes:
options:
name:
description:
- - Name of the encrypted block device as it appears in the C(/etc/crypttab) file, or
- optionally prefixed with V(/dev/mapper/), as it appears in the filesystem. V(/dev/mapper/)
- will be stripped from O(name).
+ - Name of the encrypted block device as it appears in the C(/etc/crypttab) file, or optionally prefixed with V(/dev/mapper/),
+ as it appears in the filesystem. V(/dev/mapper/) is stripped from O(name).
type: str
required: true
state:
description:
- - Use V(present) to add a line to C(/etc/crypttab) or update its definition
- if already present.
+ - Use V(present) to add a line to C(/etc/crypttab) or update its definition if already present.
- Use V(absent) to remove a line with matching O(name).
- - Use V(opts_present) to add options to those already present; options with
- different values will be updated.
+ - Use V(opts_present) to add options to those already present; options with different values are updated.
- Use V(opts_absent) to remove options from the existing set.
type: str
required: true
- choices: [ absent, opts_absent, opts_present, present ]
+ choices: [absent, opts_absent, opts_present, present]
backing_device:
description:
- - Path to the underlying block device or file, or the UUID of a block-device
- prefixed with V(UUID=).
+ - Path to the underlying block device or file, or the UUID of a block-device prefixed with V(UUID=).
type: str
password:
description:
- - Encryption password, the path to a file containing the password, or
- V(-) or unset if the password should be entered at boot.
+ - Encryption password, the path to a file containing the password, or V(-) or unset if the password should be entered
+ at boot.
type: path
opts:
description:
@@ -61,10 +56,10 @@ options:
type: path
default: /etc/crypttab
author:
-- Steve (@groks)
-'''
+ - Steve (@groks)
+"""
-EXAMPLES = r'''
+EXAMPLES = r"""
- name: Set the options explicitly a device which must already exist
community.general.crypttab:
name: luks-home
@@ -78,7 +73,15 @@ EXAMPLES = r'''
opts: discard
loop: '{{ ansible_mounts }}'
when: "'/dev/mapper/luks-' in item.device"
-'''
+
+- name: Add entry to /etc/crypttab for luks-home with password file
+ community.general.crypttab:
+ name: luks-home
+ backing_device: UUID=123e4567-e89b-12d3-a456-426614174000
+ password: /root/keys/luks-home.key
+ opts: discard,cipher=aes-cbc-essiv:sha256
+ state: present
+"""
import os
import traceback
@@ -121,7 +124,7 @@ def main():
('backing_device', backing_device),
('password', password),
('opts', opts)):
- if (arg is not None and (' ' in arg or '\t' in arg or arg == '')):
+ if arg is not None and (' ' in arg or '\t' in arg or arg == ''):
module.fail_json(msg="invalid '%s': contains white space or is empty" % arg_name,
**module.params)
@@ -159,11 +162,8 @@ def main():
changed, reason = existing_line.opts.remove(opts)
if changed and not module.check_mode:
- try:
- f = open(path, 'wb')
+ with open(path, 'wb') as f:
f.write(to_bytes(crypttab, errors='surrogate_or_strict'))
- finally:
- f.close()
module.exit_json(changed=changed, msg=reason, **module.params)
@@ -178,12 +178,9 @@ class Crypttab(object):
os.makedirs(os.path.dirname(path))
open(path, 'a').close()
- try:
- f = open(path, 'r')
+ with open(path, 'r') as f:
for line in f.readlines():
self._lines.append(Line(line))
- finally:
- f.close()
def add(self, line):
self._lines.append(line)
diff --git a/plugins/modules/datadog_downtime.py b/plugins/modules/datadog_downtime.py
index a3a6a660f0..9e48410014 100644
--- a/plugins/modules/datadog_downtime.py
+++ b/plugins/modules/datadog_downtime.py
@@ -9,8 +9,7 @@ from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = """
----
+DOCUMENTATION = r"""
module: datadog_downtime
short_description: Manages Datadog downtimes
version_added: 2.0.0
@@ -25,132 +24,133 @@ requirements:
extends_documentation_fragment:
- community.general.attributes
attributes:
- check_mode:
- support: none
- diff_mode:
- support: none
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
options:
- api_key:
- description:
- - Your Datadog API key.
- required: true
- type: str
- api_host:
- description:
- - The URL to the Datadog API.
- - This value can also be set with the E(DATADOG_HOST) environment variable.
- required: false
- default: https://api.datadoghq.com
- type: str
- app_key:
- description:
- - Your Datadog app key.
- required: true
- type: str
- state:
- description:
- - The designated state of the downtime.
- required: false
- choices: ["present", "absent"]
- default: present
- type: str
- id:
- description:
- - The identifier of the downtime.
- - If empty, a new downtime gets created, otherwise it is either updated or deleted depending of the O(state).
- - To keep your playbook idempotent, you should save the identifier in a file and read it in a lookup.
- type: int
+ api_key:
+ description:
+ - Your Datadog API key.
+ required: true
+ type: str
+ api_host:
+ description:
+ - The URL to the Datadog API.
+ - This value can also be set with the E(DATADOG_HOST) environment variable.
+ required: false
+ default: https://api.datadoghq.com
+ type: str
+ app_key:
+ description:
+ - Your Datadog app key.
+ required: true
+ type: str
+ state:
+ description:
+ - The designated state of the downtime.
+ required: false
+ choices: ["present", "absent"]
+ default: present
+ type: str
+ id:
+ description:
+ - The identifier of the downtime.
+ - If empty, a new downtime gets created, otherwise it is either updated or deleted depending of the O(state).
+ - To keep your playbook idempotent, you should save the identifier in a file and read it in a lookup.
+ type: int
+ monitor_tags:
+ description:
+ - A list of monitor tags to which the downtime applies.
+ - The resulting downtime applies to monitors that match ALL provided monitor tags.
+ type: list
+ elements: str
+ scope:
+ description:
+ - A list of scopes to which the downtime applies.
+ - The resulting downtime applies to sources that matches ALL provided scopes.
+ type: list
+ elements: str
+ monitor_id:
+ description:
+ - The ID of the monitor to mute. If not provided, the downtime applies to all monitors.
+ type: int
+ downtime_message:
+ description:
+ - A message to include with notifications for this downtime.
+ - Email notifications can be sent to specific users by using the same "@username" notation as events.
+ type: str
+ start:
+ type: int
+ description:
+ - POSIX timestamp to start the downtime. If not provided, the downtime starts the moment it is created.
+ end:
+ type: int
+ description:
+ - POSIX timestamp to end the downtime. If not provided, the downtime is in effect until you cancel it.
+ timezone:
+ description:
+ - The timezone for the downtime.
+ type: str
+ rrule:
+ description:
+ - The C(RRULE) standard for defining recurring events.
+ - For example, to have a recurring event on the first day of each month, select a type of rrule and set the C(FREQ)
+ to C(MONTHLY) and C(BYMONTHDAY) to C(1).
+ - Most common rrule options from the iCalendar Spec are supported.
+ - Attributes specifying the duration in C(RRULE) are not supported (for example C(DTSTART), C(DTEND), C(DURATION)).
+ type: str
+"""
+
+EXAMPLES = r"""
+- name: Create a downtime
+ register: downtime_var
+ community.general.datadog_downtime:
+ state: present
monitor_tags:
- description:
- - A list of monitor tags to which the downtime applies.
- - The resulting downtime applies to monitors that match ALL provided monitor tags.
- type: list
- elements: str
- scope:
- description:
- - A list of scopes to which the downtime applies.
- - The resulting downtime applies to sources that matches ALL provided scopes.
- type: list
- elements: str
- monitor_id:
- description:
- - The ID of the monitor to mute. If not provided, the downtime applies to all monitors.
- type: int
- downtime_message:
- description:
- - A message to include with notifications for this downtime.
- - Email notifications can be sent to specific users by using the same "@username" notation as events.
- type: str
- start:
- type: int
- description:
- - POSIX timestamp to start the downtime. If not provided, the downtime starts the moment it is created.
- end:
- type: int
- description:
- - POSIX timestamp to end the downtime. If not provided, the downtime is in effect until you cancel it.
- timezone:
- description:
- - The timezone for the downtime.
- type: str
- rrule:
- description:
- - The C(RRULE) standard for defining recurring events.
- - For example, to have a recurring event on the first day of each month,
- select a type of rrule and set the C(FREQ) to C(MONTHLY) and C(BYMONTHDAY) to C(1).
- - Most common rrule options from the iCalendar Spec are supported.
- - Attributes specifying the duration in C(RRULE) are not supported (for example C(DTSTART), C(DTEND), C(DURATION)).
- type: str
+ - "foo:bar"
+ downtime_message: "Downtime for foo:bar"
+ scope: "test"
+ api_key: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
+ app_key: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
+ # Lookup the id in the file and ignore errors if the file doesn't exits, so downtime gets created
+ id: "{{ lookup('file', inventory_hostname ~ '_downtime_id.txt', errors='ignore') }}"
+- name: Save downtime id to file for later updates and idempotence
+ delegate_to: localhost
+ copy:
+ content: "{{ downtime.downtime.id }}"
+ dest: "{{ inventory_hostname ~ '_downtime_id.txt' }}"
"""
-EXAMPLES = """
- - name: Create a downtime
- register: downtime_var
- community.general.datadog_downtime:
- state: present
- monitor_tags:
- - "foo:bar"
- downtime_message: "Downtime for foo:bar"
- scope: "test"
- api_key: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
- app_key: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
- # Lookup the id in the file and ignore errors if the file doesn't exits, so downtime gets created
- id: "{{ lookup('file', inventory_hostname ~ '_downtime_id.txt', errors='ignore') }}"
- - name: Save downtime id to file for later updates and idempotence
- delegate_to: localhost
- copy:
- content: "{{ downtime.downtime.id }}"
- dest: "{{ inventory_hostname ~ '_downtime_id.txt' }}"
-"""
-
-RETURN = """
+RETURN = r"""
# Returns the downtime JSON dictionary from the API response under the C(downtime) key.
# See https://docs.datadoghq.com/api/v1/downtimes/#schedule-a-downtime for more details.
downtime:
- description: The downtime returned by the API.
- type: dict
- returned: always
- sample: {
- "active": true,
- "canceled": null,
- "creator_id": 1445416,
- "disabled": false,
- "downtime_type": 2,
- "end": null,
- "id": 1055751000,
- "message": "Downtime for foo:bar",
- "monitor_id": null,
- "monitor_tags": [
- "foo:bar"
- ],
- "parent_id": null,
- "recurrence": null,
- "scope": [
- "test"
- ],
- "start": 1607015009,
- "timezone": "UTC",
- "updater_id": null
+ description: The downtime returned by the API.
+ type: dict
+ returned: always
+ sample:
+ {
+ "active": true,
+ "canceled": null,
+ "creator_id": 1445416,
+ "disabled": false,
+ "downtime_type": 2,
+ "end": null,
+ "id": 1055751000,
+ "message": "Downtime for foo:bar",
+ "monitor_id": null,
+ "monitor_tags": [
+ "foo:bar"
+ ],
+ "parent_id": null,
+ "recurrence": null,
+ "scope": [
+ "test"
+ ],
+ "start": 1607015009,
+ "timezone": "UTC",
+ "updater_id": null
}
"""
@@ -175,18 +175,18 @@ def main():
module = AnsibleModule(
argument_spec=dict(
api_key=dict(required=True, no_log=True),
- api_host=dict(required=False, default="https://api.datadoghq.com"),
+ api_host=dict(default="https://api.datadoghq.com"),
app_key=dict(required=True, no_log=True),
- state=dict(required=False, choices=["present", "absent"], default="present"),
- monitor_tags=dict(required=False, type="list", elements="str"),
- scope=dict(required=False, type="list", elements="str"),
- monitor_id=dict(required=False, type="int"),
- downtime_message=dict(required=False, no_log=True),
- start=dict(required=False, type="int"),
- end=dict(required=False, type="int"),
- timezone=dict(required=False, type="str"),
- rrule=dict(required=False, type="str"),
- id=dict(required=False, type="int"),
+ state=dict(choices=["present", "absent"], default="present"),
+ monitor_tags=dict(type="list", elements="str"),
+ scope=dict(type="list", elements="str"),
+ monitor_id=dict(type="int"),
+ downtime_message=dict(no_log=True),
+ start=dict(type="int"),
+ end=dict(type="int"),
+ timezone=dict(type="str"),
+ rrule=dict(type="str"),
+ id=dict(type="int"),
)
)
diff --git a/plugins/modules/datadog_event.py b/plugins/modules/datadog_event.py
index 6008b565b3..fd75ea81de 100644
--- a/plugins/modules/datadog_event.py
+++ b/plugins/modules/datadog_event.py
@@ -14,81 +14,88 @@ from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
module: datadog_event
-short_description: Posts events to Datadog service
+short_description: Posts events to Datadog service
description:
- - "Allows to post events to Datadog (www.datadoghq.com) service."
- - "Uses http://docs.datadoghq.com/api/#events API."
+ - Allows to post events to Datadog (www.datadoghq.com) service.
+ - Uses http://docs.datadoghq.com/api/#events API.
author:
- "Artūras 'arturaz' Šlajus (@arturaz)"
- "Naoya Nakazawa (@n0ts)"
extends_documentation_fragment:
- community.general.attributes
attributes:
- check_mode:
- support: none
- diff_mode:
- support: none
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
options:
- api_key:
- type: str
- description: ["Your DataDog API key."]
- required: true
- app_key:
- type: str
- description: ["Your DataDog app key."]
- required: true
- title:
- type: str
- description: ["The event title."]
- required: true
- text:
- type: str
- description: ["The body of the event."]
- required: true
- date_happened:
- type: int
- description:
- - POSIX timestamp of the event.
- - Default value is now.
- priority:
- type: str
- description: ["The priority of the event."]
- default: normal
- choices: [normal, low]
- host:
- type: str
- description:
- - Host name to associate with the event.
- - If not specified, it defaults to the remote system's hostname.
- api_host:
- type: str
- description:
- - DataDog API endpoint URL.
- version_added: '3.3.0'
- tags:
- type: list
- elements: str
- description: ["Comma separated list of tags to apply to the event."]
- alert_type:
- type: str
- description: ["Type of alert."]
- default: info
- choices: ['error', 'warning', 'info', 'success']
- aggregation_key:
- type: str
- description: ["An arbitrary string to use for aggregation."]
- validate_certs:
- description:
- - If V(false), SSL certificates will not be validated. This should only be used
- on personally controlled sites using self-signed certificates.
- type: bool
- default: true
-'''
+ api_key:
+ type: str
+ description:
+ - Your DataDog API key.
+ required: true
+ app_key:
+ type: str
+ description:
+ - Your DataDog app key.
+ required: true
+ title:
+ type: str
+ description:
+ - The event title.
+ required: true
+ text:
+ type: str
+ description:
+ - The body of the event.
+ required: true
+ date_happened:
+ type: int
+ description:
+ - POSIX timestamp of the event.
+ - Default value is now.
+ priority:
+ type: str
+ description:
+ - The priority of the event.
+ default: normal
+ choices: [normal, low]
+ host:
+ type: str
+ description:
+ - Host name to associate with the event.
+ - If not specified, it defaults to the remote system's hostname.
+ api_host:
+ type: str
+ description:
+ - DataDog API endpoint URL.
+ version_added: '3.3.0'
+ tags:
+ type: list
+ elements: str
+ description:
+ - Comma separated list of tags to apply to the event.
+ alert_type:
+ type: str
+ description:
+ - Type of alert.
+ default: info
+ choices: ['error', 'warning', 'info', 'success']
+ aggregation_key:
+ type: str
+ description:
+ - An arbitrary string to use for aggregation.
+ validate_certs:
+ description:
+ - If V(false), SSL certificates are not validated. This should only be used on personally controlled sites using self-signed
+ certificates.
+ type: bool
+ default: true
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Post an event with low priority
community.general.datadog_event:
title: Testing from ansible
@@ -116,8 +123,7 @@ EXAMPLES = '''
- aa
- b
- '#host:{{ inventory_hostname }}'
-
-'''
+"""
import platform
import traceback
diff --git a/plugins/modules/datadog_monitor.py b/plugins/modules/datadog_monitor.py
index 75ae8c2332..f778d2444d 100644
--- a/plugins/modules/datadog_monitor.py
+++ b/plugins/modules/datadog_monitor.py
@@ -9,8 +9,7 @@ from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
module: datadog_monitor
short_description: Manages Datadog monitors
description:
@@ -21,181 +20,181 @@ requirements: [datadog]
extends_documentation_fragment:
- community.general.attributes
attributes:
- check_mode:
- support: none
- diff_mode:
- support: none
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
options:
- api_key:
- description:
- - Your Datadog API key.
- required: true
- type: str
- api_host:
- description:
- - The URL to the Datadog API. Default value is V(https://api.datadoghq.com).
- - This value can also be set with the E(DATADOG_HOST) environment variable.
- required: false
- type: str
- version_added: '0.2.0'
- app_key:
- description:
- - Your Datadog app key.
- required: true
- type: str
- state:
- description:
- - The designated state of the monitor.
- required: true
- choices: ['present', 'absent', 'mute', 'unmute']
- type: str
- tags:
- description:
- - A list of tags to associate with your monitor when creating or updating.
- - This can help you categorize and filter monitors.
- type: list
- elements: str
- type:
- description:
- - The type of the monitor.
- - The types V(query alert), V(trace-analytics alert) and V(rum alert) were added in community.general 2.1.0.
- - The type V(composite) was added in community.general 3.4.0.
- - The type V(event-v2 alert) was added in community.general 4.8.0.
- choices:
- - metric alert
- - service check
- - event alert
- - event-v2 alert
- - process alert
- - log alert
- - query alert
- - trace-analytics alert
- - rum alert
- - composite
- type: str
- query:
- description:
- - The monitor query to notify on.
- - Syntax varies depending on what type of monitor you are creating.
- type: str
- name:
- description:
- - The name of the alert.
- required: true
- type: str
- notification_message:
- description:
- - A message to include with notifications for this monitor.
- - Email notifications can be sent to specific users by using the same '@username' notation as events.
- - Monitor message template variables can be accessed by using double square brackets, i.e '[[' and ']]'.
- type: str
- silenced:
- type: dict
- description:
- - Dictionary of scopes to silence, with timestamps or None.
- - Each scope will be muted until the given POSIX timestamp or forever if the value is None.
- notify_no_data:
- description:
- - Whether this monitor will notify when data stops reporting.
- type: bool
- default: false
- no_data_timeframe:
- description:
- - The number of minutes before a monitor will notify when data stops reporting.
- - Must be at least 2x the monitor timeframe for metric alerts or 2 minutes for service checks.
- - If not specified, it defaults to 2x timeframe for metric, 2 minutes for service.
- type: str
- timeout_h:
- description:
- - The number of hours of the monitor not reporting data before it will automatically resolve from a triggered state.
- type: str
- renotify_interval:
- description:
- - The number of minutes after the last notification before a monitor will re-notify on the current status.
- - It will only re-notify if it is not resolved.
- type: str
- escalation_message:
- description:
- - A message to include with a re-notification. Supports the '@username' notification we allow elsewhere.
- - Not applicable if O(renotify_interval=none).
- type: str
- notify_audit:
- description:
- - Whether tagged users will be notified on changes to this monitor.
- type: bool
- default: false
- thresholds:
- type: dict
- description:
- - A dictionary of thresholds by status.
- - Only available for service checks and metric alerts.
- - Because each of them can have multiple thresholds, we do not define them directly in the query.
- - "If not specified, it defaults to: V({'ok': 1, 'critical': 1, 'warning': 1})."
- locked:
- description:
- - Whether changes to this monitor should be restricted to the creator or admins.
- type: bool
- default: false
- require_full_window:
- description:
- - Whether this monitor needs a full window of data before it gets evaluated.
- - We highly recommend you set this to False for sparse metrics, otherwise some evaluations will be skipped.
- type: bool
- new_host_delay:
- description:
- - A positive integer representing the number of seconds to wait before evaluating the monitor for new hosts.
- - This gives the host time to fully initialize.
- type: str
- evaluation_delay:
- description:
- - Time to delay evaluation (in seconds).
- - Effective for sparse values.
- type: str
- id:
- description:
- - The ID of the alert.
- - If set, will be used instead of the name to locate the alert.
- type: str
- include_tags:
- description:
- - Whether notifications from this monitor automatically inserts its triggering tags into the title.
- type: bool
- default: true
- version_added: 1.3.0
- priority:
- description:
- - Integer from 1 (high) to 5 (low) indicating alert severity.
- type: int
- version_added: 4.6.0
- notification_preset_name:
- description:
- - Toggles the display of additional content sent in the monitor notification.
- choices:
- - show_all
- - hide_query
- - hide_handles
- - hide_all
- type: str
- version_added: 7.1.0
- renotify_occurrences:
- description:
- - The number of times re-notification messages should be sent on the current status at the provided re-notification interval.
- type: int
- version_added: 7.1.0
- renotify_statuses:
- description:
- - The types of monitor statuses for which re-notification messages are sent.
- choices:
- - alert
- - warn
- - no data
- type: list
- elements: str
- version_added: 7.1.0
+ api_key:
+ description:
+ - Your Datadog API key.
+ required: true
+ type: str
+ api_host:
+ description:
+ - The URL to the Datadog API. Default value is V(https://api.datadoghq.com).
+ - This value can also be set with the E(DATADOG_HOST) environment variable.
+ required: false
+ type: str
+ version_added: '0.2.0'
+ app_key:
+ description:
+ - Your Datadog app key.
+ required: true
+ type: str
+ state:
+ description:
+ - The designated state of the monitor.
+ required: true
+ choices: ['present', 'absent', 'mute', 'unmute']
+ type: str
+ tags:
+ description:
+ - A list of tags to associate with your monitor when creating or updating.
+ - This can help you categorize and filter monitors.
+ type: list
+ elements: str
+ type:
+ description:
+ - The type of the monitor.
+ - The types V(query alert), V(trace-analytics alert) and V(rum alert) were added in community.general 2.1.0.
+ - The type V(composite) was added in community.general 3.4.0.
+ - The type V(event-v2 alert) was added in community.general 4.8.0.
+ choices:
+ - metric alert
+ - service check
+ - event alert
+ - event-v2 alert
+ - process alert
+ - log alert
+ - query alert
+ - trace-analytics alert
+ - rum alert
+ - composite
+ type: str
+ query:
+ description:
+ - The monitor query to notify on.
+ - Syntax varies depending on what type of monitor you are creating.
+ type: str
+ name:
+ description:
+ - The name of the alert.
+ required: true
+ type: str
+ notification_message:
+ description:
+ - A message to include with notifications for this monitor.
+ - Email notifications can be sent to specific users by using the same '@username' notation as events.
+ - Monitor message template variables can be accessed by using double square brackets, in other words C([[) and C(]]).
+ type: str
+ silenced:
+ type: dict
+ description:
+ - Dictionary of scopes to silence, with timestamps or None.
+ - Each scope is muted until the given POSIX timestamp or forever if the value is V(None).
+ notify_no_data:
+ description:
+ - Whether this monitor notifies when data stops reporting.
+ type: bool
+ default: false
+ no_data_timeframe:
+ description:
+ - The number of minutes before a monitor notifies when data stops reporting.
+ - Must be at least 2x the monitor timeframe for metric alerts or 2 minutes for service checks.
+ - If not specified, it defaults to 2x timeframe for metric, 2 minutes for service.
+ type: str
+ timeout_h:
+ description:
+ - The number of hours of the monitor not reporting data before it automatically resolves from a triggered state.
+ type: str
+ renotify_interval:
+ description:
+ - The number of minutes after the last notification before a monitor re-notifies on the current status.
+ - It only re-notifies if it is not resolved.
+ type: str
+ escalation_message:
+ description:
+ - A message to include with a re-notification. Supports the '@username' notification we allow elsewhere.
+ - Not applicable if O(renotify_interval=none).
+ type: str
+ notify_audit:
+ description:
+ - Whether tagged users are notified on changes to this monitor.
+ type: bool
+ default: false
+ thresholds:
+ type: dict
+ description:
+ - A dictionary of thresholds by status.
+ - Only available for service checks and metric alerts.
+ - Because each of them can have multiple thresholds, we do not define them directly in the query.
+ - "If not specified, it defaults to: V({'ok': 1, 'critical': 1, 'warning': 1})."
+ locked:
+ description:
+ - Whether changes to this monitor should be restricted to the creator or admins.
+ type: bool
+ default: false
+ require_full_window:
+ description:
+ - Whether this monitor needs a full window of data before it gets evaluated.
+ - We highly recommend you set this to V(false) for sparse metrics, otherwise some evaluations are skipped.
+ type: bool
+ new_host_delay:
+ description:
+ - A positive integer representing the number of seconds to wait before evaluating the monitor for new hosts.
+ - This gives the host time to fully initialize.
+ type: str
+ evaluation_delay:
+ description:
+ - Time to delay evaluation (in seconds).
+ - Effective for sparse values.
+ type: str
+ id:
+ description:
+ - The ID of the alert.
+ - If set, it is used instead of O(name) to locate the alert.
+ type: str
+ include_tags:
+ description:
+ - Whether notifications from this monitor automatically inserts its triggering tags into the title.
+ type: bool
+ default: true
+ version_added: 1.3.0
+ priority:
+ description:
+ - Integer from V(1) (high) to V(5) (low) indicating alert severity.
+ type: int
+ version_added: 4.6.0
+ notification_preset_name:
+ description:
+ - Toggles the display of additional content sent in the monitor notification.
+ choices:
+ - show_all
+ - hide_query
+ - hide_handles
+ - hide_all
+ type: str
+ version_added: 7.1.0
+ renotify_occurrences:
+ description:
+ - The number of times re-notification messages should be sent on the current status at the provided re-notification
+ interval.
+ type: int
+ version_added: 7.1.0
+ renotify_statuses:
+ description:
+ - The types of monitor statuses for which re-notification messages are sent.
+ choices:
+ - alert
+ - warn
+ - no data
+ type: list
+ elements: str
+ version_added: 7.1.0
+"""
-'''
-
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Create a metric monitor
community.general.datadog_monitor:
type: "metric alert"
@@ -239,7 +238,8 @@ EXAMPLES = '''
api_host: https://api.datadoghq.eu
api_key: "9775a026f1ca7d1c6c5af9d94d9595a4"
app_key: "87ce4a24b5553d2e482ea8a8500e71b8ad4554ff"
-'''
+"""
+
import traceback
# Import Datadog
@@ -275,14 +275,14 @@ def main():
renotify_interval=dict(),
escalation_message=dict(),
notify_audit=dict(default=False, type='bool'),
- thresholds=dict(type='dict', default=None),
- tags=dict(type='list', elements='str', default=None),
+ thresholds=dict(type='dict'),
+ tags=dict(type='list', elements='str'),
locked=dict(default=False, type='bool'),
require_full_window=dict(type='bool'),
new_host_delay=dict(),
evaluation_delay=dict(),
id=dict(),
- include_tags=dict(required=False, default=True, type='bool'),
+ include_tags=dict(default=True, type='bool'),
priority=dict(type='int'),
notification_preset_name=dict(choices=['show_all', 'hide_query', 'hide_handles', 'hide_all']),
renotify_occurrences=dict(type='int'),
@@ -435,7 +435,7 @@ def mute_monitor(module):
module.fail_json(msg="Monitor %s not found!" % module.params['name'])
elif monitor['options']['silenced']:
module.fail_json(msg="Monitor is already muted. Datadog does not allow to modify muted alerts, consider unmuting it first.")
- elif (module.params['silenced'] is not None and len(set(monitor['options']['silenced']) ^ set(module.params['silenced'])) == 0):
+ elif module.params['silenced'] is not None and len(set(monitor['options']['silenced']) ^ set(module.params['silenced'])) == 0:
module.exit_json(changed=False)
try:
if module.params['silenced'] is None or module.params['silenced'] == "":
diff --git a/plugins/modules/dconf.py b/plugins/modules/dconf.py
index 065cf1a6a7..762c443130 100644
--- a/plugins/modules/dconf.py
+++ b/plugins/modules/dconf.py
@@ -9,53 +9,39 @@ from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = r'''
+DOCUMENTATION = r"""
module: dconf
author:
- - "Branko Majic (@azaghal)"
+ - "Branko Majic (@azaghal)"
short_description: Modify and read dconf database
description:
- - This module allows modifications and reading of C(dconf) database. The module
- is implemented as a wrapper around C(dconf) tool. Please see the dconf(1) man
- page for more details.
- - Since C(dconf) requires a running D-Bus session to change values, the module
- will try to detect an existing session and reuse it, or run the tool via
- C(dbus-run-session).
+ - This module allows modifications and reading of C(dconf) database. The module is implemented as a wrapper around C(dconf)
+ tool. Please see the dconf(1) man page for more details.
+ - Since C(dconf) requires a running D-Bus session to change values, the module tries to detect an existing session and reuse
+ it, or run the tool using C(dbus-run-session).
requirements:
- - Optionally the C(gi.repository) Python library (usually included in the OS
- on hosts which have C(dconf)); this will become a non-optional requirement
- in a future major release of community.general.
+ - Optionally the C(gi.repository) Python library (usually included in the OS on hosts which have C(dconf)); this is to become
+ a non-optional requirement in a future major release of community.general.
notes:
- - This module depends on C(psutil) Python library (version 4.0.0 and upwards),
- C(dconf), C(dbus-send), and C(dbus-run-session) binaries. Depending on
- distribution you are using, you may need to install additional packages to
- have these available.
- - This module uses the C(gi.repository) Python library when available for
- accurate comparison of values in C(dconf) to values specified in Ansible
- code. C(gi.repository) is likely to be present on most systems which have
- C(dconf) but may not be present everywhere. When it is missing, a simple
- string comparison between values is used, and there may be false positives,
- that is, Ansible may think that a value is being changed when it is not.
- This fallback will be removed in a future version of this module, at which
- point the module will stop working on hosts without C(gi.repository).
- - Detection of existing, running D-Bus session, required to change settings
- via C(dconf), is not 100% reliable due to implementation details of D-Bus
- daemon itself. This might lead to running applications not picking-up
- changes on the fly if options are changed via Ansible and
- C(dbus-run-session).
- - Keep in mind that the C(dconf) CLI tool, which this module wraps around,
- utilises an unusual syntax for the values (GVariant). For example, if you
- wanted to provide a string value, the correct syntax would be
- O(value="'myvalue'") - with single quotes as part of the Ansible parameter
- value.
- - When using loops in combination with a value like
- V("[('xkb', 'us'\), ('xkb', 'se'\)]"), you need to be aware of possible
- type conversions. Applying a filter V({{ item.value | string }})
- to the parameter variable can avoid potential conversion problems.
- - The easiest way to figure out exact syntax/value you need to provide for a
- key is by making the configuration change in application affected by the
- key, and then having a look at value set via commands C(dconf dump
- /path/to/dir/) or C(dconf read /path/to/key).
+ - This module depends on C(psutil) Python library (version 4.0.0 and upwards), C(dconf), C(dbus-send), and C(dbus-run-session)
+ binaries. Depending on distribution you are using, you may need to install additional packages to have these available.
+ - This module uses the C(gi.repository) Python library when available for accurate comparison of values in C(dconf) to values
+ specified in Ansible code. C(gi.repository) is likely to be present on most systems which have C(dconf) but may not be
+ present everywhere. When it is missing, a simple string comparison between values is used, and there may be false positives,
+ that is, Ansible may think that a value is being changed when it is not. This fallback is to be removed in a future version
+ of this module, at which point the module will stop working on hosts without C(gi.repository).
+ - Detection of existing, running D-Bus session, required to change settings using C(dconf), is not 100% reliable due to
+ implementation details of D-Bus daemon itself. This might lead to running applications not picking-up changes on-the-fly
+ if options are changed using Ansible and C(dbus-run-session).
+ - Keep in mind that the C(dconf) CLI tool, which this module wraps around, utilises an unusual syntax for the values (GVariant).
+ For example, if you wanted to provide a string value, the correct syntax would be O(value="'myvalue'") - with single quotes
+ as part of the Ansible parameter value.
+ - When using loops in combination with a value like V("[('xkb', 'us'\), ('xkb', 'se'\)]"), you need to be aware of possible
+ type conversions. Applying a filter V({{ item.value | string }}) to the parameter variable can avoid potential conversion
+ problems.
+ - The easiest way to figure out exact syntax/value you need to provide for a key is by making the configuration change in
+ application affected by the key, and then having a look at value set using commands C(dconf dump /path/to/dir/) or C(dconf
+ read /path/to/key).
extends_documentation_fragment:
- community.general.attributes
attributes:
@@ -73,30 +59,27 @@ options:
type: raw
required: false
description:
- - Value to set for the specified dconf key. Value should be specified in
- GVariant format. Due to complexity of this format, it is best to have a
- look at existing values in the dconf database.
+ - Value to set for the specified dconf key. Value should be specified in GVariant format. Due to complexity of this
+ format, it is best to have a look at existing values in the dconf database.
- Required for O(state=present).
- - Although the type is specified as "raw", it should typically be
- specified as a string. However, boolean values in particular are
- handled properly even when specified as booleans rather than strings
- (in fact, handling booleans properly is why the type of this parameter
- is "raw").
+ - Although the type is specified as "raw", it should typically be specified as a string. However, boolean values in
+ particular are handled properly even when specified as booleans rather than strings (in fact, handling booleans properly
+ is why the type of this parameter is "raw").
state:
type: str
required: false
default: present
- choices: [ 'read', 'present', 'absent' ]
+ choices: ['read', 'present', 'absent']
description:
- The action to take upon the key/value.
-'''
+"""
RETURN = r"""
value:
- description: value associated with the requested key
- returned: success, state was "read"
- type: str
- sample: "'Default'"
+ description: Value associated with the requested key.
+ returned: success, state was "read"
+ type: str
+ sample: "'Default'"
"""
EXAMPLES = r"""
@@ -415,7 +398,7 @@ def main():
state=dict(default='present', choices=['present', 'absent', 'read']),
key=dict(required=True, type='str', no_log=False),
# Converted to str below after special handling of bool.
- value=dict(required=False, default=None, type='raw'),
+ value=dict(type='raw'),
),
supports_check_mode=True,
required_if=[
diff --git a/plugins/modules/decompress.py b/plugins/modules/decompress.py
new file mode 100644
index 0000000000..03be61a8e6
--- /dev/null
+++ b/plugins/modules/decompress.py
@@ -0,0 +1,211 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2024, Stanislav Shamilov
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+DOCUMENTATION = r"""
+module: decompress
+short_description: Decompresses compressed files
+version_added: 10.1.0
+description:
+ - Decompresses compressed files.
+ - The source (compressed) file and destination (decompressed) files are on the remote host.
+ - Source file can be deleted after decompression.
+extends_documentation_fragment:
+ - ansible.builtin.files
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ src:
+ description:
+ - Remote absolute path for the file to decompress.
+ type: path
+ required: true
+ dest:
+ description:
+ - The file name of the destination file where the compressed file is decompressed.
+ - If the destination file exists, it is truncated and overwritten.
+ - If not specified, the destination filename is derived from O(src) by removing the compression format extension. For
+ example, when O(src) is V(/path/to/file.txt.gz) and O(format) is V(gz), O(dest) is V(/path/to/file.txt). If the O(src)
+ file does not have an extension for the current O(format), the O(dest) filename is made by appending C(_decompressed)
+ to the O(src) filename. For instance, when O(src) is V(/path/to/file.myextension), the (dest) filename is V(/path/to/file.myextension_decompressed).
+ type: path
+ format:
+ description:
+ - The type of compression to use to decompress.
+ type: str
+ choices: [gz, bz2, xz]
+ default: gz
+ remove:
+ description:
+ - Remove original compressed file after decompression.
+ type: bool
+ default: false
+requirements:
+ - Requires C(lzma) (standard library of Python 3) or L(backports.lzma, https://pypi.org/project/backports.lzma/) (Python
+ 2) if using C(xz) format.
+author:
+ - Stanislav Shamilov (@shamilovstas)
+"""
+
+EXAMPLES = r"""
+- name: Decompress file /path/to/file.txt.gz into /path/to/file.txt (gz compression is used by default)
+ community.general.decompress:
+ src: /path/to/file.txt.gz
+ dest: /path/to/file.txt
+
+- name: Decompress file /path/to/file.txt.gz into /path/to/file.txt
+ community.general.decompress:
+ src: /path/to/file.txt.gz
+
+- name: Decompress file compressed with bzip2
+ community.general.decompress:
+ src: /path/to/file.txt.bz2
+ dest: /path/to/file.bz2
+ format: bz2
+
+- name: Decompress file and delete the compressed file afterwards
+ community.general.decompress:
+ src: /path/to/file.txt.gz
+ dest: /path/to/file.txt
+ remove: true
+"""
+
+RETURN = r"""
+dest:
+ description: Path to decompressed file.
+ type: str
+ returned: success
+ sample: /path/to/file.txt
+"""
+
+import bz2
+import filecmp
+import gzip
+import os
+import shutil
+import tempfile
+
+from ansible.module_utils import six
+from ansible_collections.community.general.plugins.module_utils.mh.module_helper import ModuleHelper
+from ansible.module_utils.common.text.converters import to_native, to_bytes
+from ansible_collections.community.general.plugins.module_utils import deps
+
+with deps.declare("lzma"):
+ if six.PY3:
+ import lzma
+ else:
+ from backports import lzma
+
+
+def lzma_decompress(src):
+ return lzma.open(src, "rb")
+
+
+def bz2_decompress(src):
+ if six.PY3:
+ return bz2.open(src, "rb")
+ else:
+ return bz2.BZ2File(src, "rb")
+
+
+def gzip_decompress(src):
+ return gzip.open(src, "rb")
+
+
+def decompress(b_src, b_dest, handler):
+ with handler(b_src) as src_file:
+ with open(b_dest, "wb") as dest_file:
+ shutil.copyfileobj(src_file, dest_file)
+
+
+class Decompress(ModuleHelper):
+ destination_filename_template = "%s_decompressed"
+ output_params = 'dest'
+
+ module = dict(
+ argument_spec=dict(
+ src=dict(type='path', required=True),
+ dest=dict(type='path'),
+ format=dict(type='str', default='gz', choices=['gz', 'bz2', 'xz']),
+ remove=dict(type='bool', default=False)
+ ),
+ add_file_common_args=True,
+ supports_check_mode=True
+ )
+
+ def __init_module__(self):
+ self.handlers = {"gz": gzip_decompress, "bz2": bz2_decompress, "xz": lzma_decompress}
+ if self.vars.dest is None:
+ self.vars.dest = self.get_destination_filename()
+ deps.validate(self.module)
+ self.configure()
+
+ def configure(self):
+ b_dest = to_bytes(self.vars.dest, errors='surrogate_or_strict')
+ b_src = to_bytes(self.vars.src, errors='surrogate_or_strict')
+ if not os.path.exists(b_src):
+ if self.vars.remove and os.path.exists(b_dest):
+ self.module.exit_json(changed=False)
+ else:
+ self.do_raise(msg="Path does not exist: '%s'" % b_src)
+ if os.path.isdir(b_src):
+ self.do_raise(msg="Cannot decompress directory '%s'" % b_src)
+ if os.path.isdir(b_dest):
+ self.do_raise(msg="Destination is a directory, cannot decompress: '%s'" % b_dest)
+
+ def __run__(self):
+ b_dest = to_bytes(self.vars.dest, errors='surrogate_or_strict')
+ b_src = to_bytes(self.vars.src, errors='surrogate_or_strict')
+
+ file_args = self.module.load_file_common_arguments(self.module.params, path=self.vars.dest)
+ handler = self.handlers[self.vars.format]
+ try:
+ tempfd, temppath = tempfile.mkstemp(dir=self.module.tmpdir)
+ self.module.add_cleanup_file(temppath)
+ b_temppath = to_bytes(temppath, errors='surrogate_or_strict')
+ decompress(b_src, b_temppath, handler)
+ except OSError as e:
+ self.do_raise(msg="Unable to create temporary file '%s'" % to_native(e))
+
+ if os.path.exists(b_dest):
+ self.changed = not filecmp.cmp(b_temppath, b_dest, shallow=False)
+ else:
+ self.changed = True
+
+ if self.changed and not self.module.check_mode:
+ try:
+ self.module.atomic_move(b_temppath, b_dest)
+ except OSError:
+ self.do_raise(msg="Unable to move temporary file '%s' to '%s'" % (b_temppath, self.vars.dest))
+
+ if self.vars.remove and not self.check_mode:
+ os.remove(b_src)
+ self.changed = self.module.set_fs_attributes_if_different(file_args, self.changed)
+
+ def get_destination_filename(self):
+ src = self.vars.src
+ fmt_extension = ".%s" % self.vars.format
+ if src.endswith(fmt_extension) and len(src) > len(fmt_extension):
+ filename = src[:-len(fmt_extension)]
+ else:
+ filename = Decompress.destination_filename_template % src
+ return filename
+
+
+def main():
+ Decompress.execute()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/plugins/modules/deploy_helper.py b/plugins/modules/deploy_helper.py
index b47ed82540..b25e68392b 100644
--- a/plugins/modules/deploy_helper.py
+++ b/plugins/modules/deploy_helper.py
@@ -11,27 +11,19 @@ from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
module: deploy_helper
author: "Ramon de la Fuente (@ramondelafuente)"
short_description: Manages some of the steps common in deploying projects
description:
- - The Deploy Helper manages some of the steps common in deploying software.
- It creates a folder structure, manages a symlink for the current release
- and cleans up old releases.
- # TODO: convert below to RETURN documentation!
- - "Running it with the O(state=query) or O(state=present) will return the C(deploy_helper) fact.
- C(project_path), whatever you set in the O(path) parameter,
- C(current_path), the path to the symlink that points to the active release,
- C(releases_path), the path to the folder to keep releases in,
- C(shared_path), the path to the folder to keep shared resources in,
- C(unfinished_filename), the file to check for to recognize unfinished builds,
- C(previous_release), the release the 'current' symlink is pointing to,
- C(previous_release_path), the full path to the 'current' symlink target,
- C(new_release), either the 'release' parameter or a generated timestamp,
- C(new_release_path), the path to the new release folder (not created by the module)."
-
+ - The Deploy Helper manages some of the steps common in deploying software. It creates a folder structure, manages a symlink
+ for the current release and cleans up old releases.
+ - Running it with the O(state=query) or O(state=present) returns the C(deploy_helper) fact. C(project_path), whatever you
+ set in the O(path) parameter, C(current_path), the path to the symlink that points to the active release, C(releases_path),
+ the path to the folder to keep releases in, C(shared_path), the path to the folder to keep shared resources in, C(unfinished_filename),
+ the file to check for to recognize unfinished builds, C(previous_release), the release the 'current' symlink is pointing
+ to, C(previous_release_path), the full path to the 'current' symlink target, C(new_release), either the O(release) parameter
+ or a generated timestamp, C(new_release_path), the path to the new release folder (not created by the module).
attributes:
check_mode:
support: full
@@ -44,42 +36,38 @@ options:
required: true
aliases: ['dest']
description:
- - The root path of the project.
- Returned in the C(deploy_helper.project_path) fact.
-
+ - The root path of the project. Returned in the C(deploy_helper.project_path) fact.
state:
type: str
description:
- The state of the project.
- - V(query) will only gather facts.
- - V(present) will create the project C(root) folder, and in it the C(releases) and C(shared) folders.
- - V(finalize) will remove the unfinished_filename file, create a symlink to the newly
- deployed release and optionally clean old releases.
- - V(clean) will remove failed & old releases.
- - V(absent) will remove the project folder (synonymous to the M(ansible.builtin.file) module with O(state=absent)).
- choices: [ present, finalize, absent, clean, query ]
+ - V(query) gathers facts.
+ - V(present) creates the project C(root) folder, and in it the C(releases) and C(shared) folders.
+ - V(finalize) removes the unfinished_filename file, creates a symlink to the newly deployed release and optionally cleans
+ old releases.
+ - V(clean) removes failed & old releases.
+ - V(absent) removes the project folder (synonymous to the M(ansible.builtin.file) module with O(state=absent)).
+ choices: [present, finalize, absent, clean, query]
default: present
release:
type: str
description:
- The release version that is being deployed. Defaults to a timestamp format C(%Y%m%d%H%M%S) (for example V(20141119223359)).
- This parameter is optional during O(state=present), but needs to be set explicitly for O(state=finalize).
- You can use the generated fact C(release={{ deploy_helper.new_release }}).
-
+ This parameter is optional during O(state=present), but needs to be set explicitly for O(state=finalize). You can
+ use the generated fact C(release={{ deploy_helper.new_release }}).
releases_path:
type: str
description:
- - The name of the folder that will hold the releases. This can be relative to O(path) or absolute.
- Returned in the C(deploy_helper.releases_path) fact.
+ - The name of the folder that holds the releases. This can be relative to O(path) or absolute. Returned in the C(deploy_helper.releases_path)
+ fact.
default: releases
shared_path:
type: path
description:
- - The name of the folder that will hold the shared resources. This can be relative to O(path) or absolute.
- If this is set to an empty string, no shared folder will be created.
- Returned in the C(deploy_helper.shared_path) fact.
+ - The name of the folder that holds the shared resources. This can be relative to O(path) or absolute. If this is set
+ to an empty string, no shared folder is created. Returned in the C(deploy_helper.shared_path) fact.
default: shared
current_path:
@@ -92,9 +80,9 @@ options:
unfinished_filename:
type: str
description:
- - The name of the file that indicates a deploy has not finished. All folders in the O(releases_path) that
- contain this file will be deleted on O(state=finalize) with O(clean=true), or O(state=clean). This file is
- automatically deleted from the C(new_release_path) during O(state=finalize).
+ - The name of the file that indicates a deploy has not finished. All folders in the O(releases_path) that contain this
+ file are deleted on O(state=finalize) with O(clean=true), or O(state=clean). This file is automatically deleted from
+ the C(new_release_path) during O(state=finalize).
default: DEPLOY_UNFINISHED
clean:
@@ -107,24 +95,22 @@ options:
type: int
description:
- The number of old releases to keep when cleaning. Used in O(state=finalize) and O(state=clean). Any unfinished builds
- will be deleted first, so only correct releases will count. The current version will not count.
+ are deleted first, so only correct releases count. The current version does not count.
default: 5
notes:
- - Facts are only returned for O(state=query) and O(state=present). If you use both, you should pass any overridden
- parameters to both calls, otherwise the second call will overwrite the facts of the first one.
- - When using O(state=clean), the releases are ordered by I(creation date). You should be able to switch to a
- new naming strategy without problems.
- - Because of the default behaviour of generating the C(new_release) fact, this module will not be idempotent
- unless you pass your own release name with O(release). Due to the nature of deploying software, this should not
- be much of a problem.
+ - Facts are only returned for O(state=query) and O(state=present). If you use both, you should pass any overridden parameters
+ to both calls, otherwise the second call overwrites the facts of the first one.
+ - When using O(state=clean), the releases are ordered by I(creation date). You should be able to switch to a new naming
+ strategy without problems.
+ - Because of the default behaviour of generating the C(new_release) fact, this module is not idempotent unless you pass
+ your own release name with O(release). Due to the nature of deploying software, this should not be much of a problem.
extends_documentation_fragment:
- ansible.builtin.files
- community.general.attributes
-'''
-
-EXAMPLES = '''
+"""
+EXAMPLES = r"""
# General explanation, starting with an example folder structure for a project:
# root:
@@ -192,10 +178,10 @@ EXAMPLES = '''
src: '{{ deploy_helper.shared_path }}/{{ item.src }}'
state: link
with_items:
- - path: app/sessions
- src: sessions
- - path: web/uploads
- src: uploads
+ - path: app/sessions
+ src: sessions
+ - path: web/uploads
+ src: uploads
- name: Finalize the deploy, removing the unfinished file and switching the symlink
community.general.deploy_helper:
path: /path/to/root
@@ -277,7 +263,8 @@ EXAMPLES = '''
path: /path/to/root
- ansible.builtin.debug:
var: deploy_helper
-'''
+"""
+
import os
import shutil
import time
diff --git a/plugins/modules/dimensiondata_network.py b/plugins/modules/dimensiondata_network.py
index cfb7d61cd9..04fff21e58 100644
--- a/plugins/modules/dimensiondata_network.py
+++ b/plugins/modules/dimensiondata_network.py
@@ -14,8 +14,7 @@ from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
module: dimensiondata_network
short_description: Create, update, and delete MCP 1.0 & 2.0 networks
extends_documentation_fragment:
@@ -24,7 +23,7 @@ extends_documentation_fragment:
- community.general.attributes
description:
- - Create, update, and delete MCP 1.0 & 2.0 networks
+ - Create, update, and delete MCP 1.0 & 2.0 networks.
author: 'Aimon Bustardo (@aimonb)'
attributes:
check_mode:
@@ -55,9 +54,9 @@ options:
choices: [present, absent]
default: present
type: str
-'''
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Create an MCP 1.0 network
community.general.dimensiondata_network:
region: na
@@ -79,43 +78,43 @@ EXAMPLES = '''
location: NA1
name: mynet
state: absent
-'''
+"""
-RETURN = '''
+RETURN = r"""
network:
- description: Dictionary describing the network.
- returned: On success when O(state=present).
- type: complex
- contains:
- id:
- description: Network ID.
- type: str
- sample: "8c787000-a000-4050-a215-280893411a7d"
- name:
- description: Network name.
- type: str
- sample: "My network"
- description:
- description: Network description.
- type: str
- sample: "My network description"
- location:
- description: Datacenter location.
- type: str
- sample: NA3
- status:
- description: Network status. (MCP 2.0 only)
- type: str
- sample: NORMAL
- private_net:
- description: Private network subnet. (MCP 1.0 only)
- type: str
- sample: "10.2.3.0"
- multicast:
- description: Multicast enabled? (MCP 1.0 only)
- type: bool
- sample: false
-'''
+ description: Dictionary describing the network.
+ returned: On success when O(state=present).
+ type: complex
+ contains:
+ id:
+ description: Network ID.
+ type: str
+ sample: "8c787000-a000-4050-a215-280893411a7d"
+ name:
+ description: Network name.
+ type: str
+ sample: "My network"
+ description:
+ description: Network description.
+ type: str
+ sample: "My network description"
+ location:
+ description: Datacenter location.
+ type: str
+ sample: NA3
+ status:
+ description: Network status. (MCP 2.0 only).
+ type: str
+ sample: NORMAL
+ private_net:
+ description: Private network subnet. (MCP 1.0 only).
+ type: str
+ sample: "10.2.3.0"
+ multicast:
+ description: Multicast enabled? (MCP 1.0 only).
+ type: bool
+ sample: false
+"""
import traceback
from ansible.module_utils.basic import AnsibleModule
@@ -141,7 +140,7 @@ class DimensionDataNetworkModule(DimensionDataModule):
module=AnsibleModule(
argument_spec=DimensionDataModule.argument_spec_with_wait(
name=dict(type='str', required=True),
- description=dict(type='str', required=False),
+ description=dict(type='str'),
service_plan=dict(default='ESSENTIALS', choices=['ADVANCED', 'ESSENTIALS']),
state=dict(default='present', choices=['present', 'absent'])
),
diff --git a/plugins/modules/dimensiondata_vlan.py b/plugins/modules/dimensiondata_vlan.py
index 9d129f3dea..b28b12d998 100644
--- a/plugins/modules/dimensiondata_vlan.py
+++ b/plugins/modules/dimensiondata_vlan.py
@@ -10,8 +10,7 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
module: dimensiondata_vlan
short_description: Manage a VLAN in a Cloud Control network domain
extends_documentation_fragment:
@@ -40,37 +39,38 @@ options:
default: ''
network_domain:
description:
- - The Id or name of the target network domain.
+ - The ID or name of the target network domain.
required: true
type: str
private_ipv4_base_address:
description:
- - The base address for the VLAN's IPv4 network (e.g. 192.168.1.0).
+ - The base address for the VLAN's IPv4 network (for example V(192.168.1.0)).
type: str
default: ''
private_ipv4_prefix_size:
description:
- - The size of the IPv4 address space, e.g 24.
- - Required, if O(private_ipv4_base_address) is specified.
+ - The size of the IPv4 address space, for example V(24).
+ - Required, if O(private_ipv4_base_address) is specified.
type: int
default: 0
state:
description:
- The desired state for the target VLAN.
- - V(readonly) ensures that the state is only ever read, not modified (the module will fail if the resource does not exist).
+ - V(readonly) ensures that the state is only ever read, not modified (the module fails if the resource does not exist).
choices: [present, absent, readonly]
default: present
type: str
allow_expand:
description:
- - Permit expansion of the target VLAN's network if the module parameters specify a larger network than the VLAN currently possesses.
- - If V(false), the module will fail under these conditions.
+ - Permit expansion of the target VLAN's network if the module parameters specify a larger network than the VLAN currently
+ possesses.
+ - If V(false), the module fails under these conditions.
- This is intended to prevent accidental expansion of a VLAN's network (since this operation is not reversible).
type: bool
default: false
-'''
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Add or update VLAN
community.general.dimensiondata_vlan:
region: na
@@ -100,59 +100,59 @@ EXAMPLES = '''
name: my_vlan_1
state: absent
wait: true
-'''
+"""
-RETURN = '''
+RETURN = r"""
vlan:
- description: Dictionary describing the VLAN.
- returned: On success when O(state=present)
- type: complex
- contains:
- id:
- description: VLAN ID.
- type: str
- sample: "aaaaa000-a000-4050-a215-2808934ccccc"
- name:
- description: VLAN name.
- type: str
- sample: "My VLAN"
- description:
- description: VLAN description.
- type: str
- sample: "My VLAN description"
- location:
- description: Datacenter location.
- type: str
- sample: NA3
- private_ipv4_base_address:
- description: The base address for the VLAN's private IPV4 network.
- type: str
- sample: 192.168.23.0
- private_ipv4_prefix_size:
- description: The prefix size for the VLAN's private IPV4 network.
- type: int
- sample: 24
- private_ipv4_gateway_address:
- description: The gateway address for the VLAN's private IPV4 network.
- type: str
- sample: 192.168.23.1
- private_ipv6_base_address:
- description: The base address for the VLAN's IPV6 network.
- type: str
- sample: 2402:9900:111:1195:0:0:0:0
- private_ipv6_prefix_size:
- description: The prefix size for the VLAN's IPV6 network.
- type: int
- sample: 64
- private_ipv6_gateway_address:
- description: The gateway address for the VLAN's IPV6 network.
- type: str
- sample: 2402:9900:111:1195:0:0:0:1
- status:
- description: VLAN status.
- type: str
- sample: NORMAL
-'''
+ description: Dictionary describing the VLAN.
+ returned: On success when O(state=present)
+ type: complex
+ contains:
+ id:
+ description: VLAN ID.
+ type: str
+ sample: "aaaaa000-a000-4050-a215-2808934ccccc"
+ name:
+ description: VLAN name.
+ type: str
+ sample: "My VLAN"
+ description:
+ description: VLAN description.
+ type: str
+ sample: "My VLAN description"
+ location:
+ description: Datacenter location.
+ type: str
+ sample: NA3
+ private_ipv4_base_address:
+ description: The base address for the VLAN's private IPV4 network.
+ type: str
+ sample: 192.168.23.0
+ private_ipv4_prefix_size:
+ description: The prefix size for the VLAN's private IPV4 network.
+ type: int
+ sample: 24
+ private_ipv4_gateway_address:
+ description: The gateway address for the VLAN's private IPV4 network.
+ type: str
+ sample: 192.168.23.1
+ private_ipv6_base_address:
+ description: The base address for the VLAN's IPV6 network.
+ type: str
+ sample: 2402:9900:111:1195:0:0:0:0
+ private_ipv6_prefix_size:
+ description: The prefix size for the VLAN's IPV6 network.
+ type: int
+ sample: 64
+ private_ipv6_gateway_address:
+ description: The gateway address for the VLAN's IPV6 network.
+ type: str
+ sample: 2402:9900:111:1195:0:0:0:1
+ status:
+ description: VLAN status.
+ type: str
+ sample: NORMAL
+"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.general.plugins.module_utils.dimensiondata import DimensionDataModule, UnknownNetworkError
@@ -186,7 +186,7 @@ class DimensionDataVlanModule(DimensionDataModule):
network_domain=dict(required=True, type='str'),
private_ipv4_base_address=dict(default='', type='str'),
private_ipv4_prefix_size=dict(default=0, type='int'),
- allow_expand=dict(required=False, default=False, type='bool'),
+ allow_expand=dict(default=False, type='bool'),
state=dict(default='present', choices=['present', 'absent', 'readonly'])
),
required_together=DimensionDataModule.required_together()
diff --git a/plugins/modules/discord.py b/plugins/modules/discord.py
index 130649f076..7cf05da0c1 100644
--- a/plugins/modules/discord.py
+++ b/plugins/modules/discord.py
@@ -8,8 +8,7 @@
from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
module: discord
short_description: Send Discord messages
version_added: 3.1.0
@@ -18,7 +17,7 @@ description:
author: Christian Wollinger (@cwollinger)
seealso:
- name: API documentation
- description: Documentation for Discord API
+ description: Documentation for Discord API.
link: https://discord.com/developers/docs/resources/webhook#execute-webhook
extends_documentation_fragment:
- community.general.attributes
@@ -31,13 +30,13 @@ options:
webhook_id:
description:
- The webhook ID.
- - "Format from Discord webhook URL: C(/webhooks/{webhook.id}/{webhook.token})."
+ - 'Format from Discord webhook URL: C(/webhooks/{webhook.id}/{webhook.token}).'
required: true
type: str
webhook_token:
description:
- The webhook token.
- - "Format from Discord webhook URL: C(/webhooks/{webhook.id}/{webhook.token})."
+ - 'Format from Discord webhook URL: C(/webhooks/{webhook.id}/{webhook.token}).'
required: true
type: str
content:
@@ -62,13 +61,13 @@ options:
description:
- Send messages as Embeds to the Discord channel.
- Embeds can have a colored border, embedded images, text fields and more.
- - "Allowed parameters are described in the Discord Docs: U(https://discord.com/developers/docs/resources/channel#embed-object)"
+ - 'Allowed parameters are described in the Discord Docs: U(https://discord.com/developers/docs/resources/channel#embed-object).'
- At least one of O(content) and O(embeds) must be specified.
type: list
elements: dict
-'''
+"""
-EXAMPLES = """
+EXAMPLES = r"""
- name: Send a message to the Discord channel
community.general.discord:
webhook_id: "00000"
@@ -119,7 +118,7 @@ EXAMPLES = """
timestamp: "{{ ansible_date_time.iso8601 }}"
"""
-RETURN = """
+RETURN = r"""
http_code:
description:
- Response Code returned by Discord API.
diff --git a/plugins/modules/django_check.py b/plugins/modules/django_check.py
index 1553da7a30..e6e03c8276 100644
--- a/plugins/modules/django_check.py
+++ b/plugins/modules/django_check.py
@@ -7,7 +7,7 @@
from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = """
+DOCUMENTATION = r"""
module: django_check
author:
- Alexei Znamensky (@russoz)
@@ -22,7 +22,7 @@ options:
database:
description:
- Specify databases to run checks against.
- - If not specified, Django will not run database tests.
+ - If not specified, Django does not run database tests.
type: list
elements: str
deploy:
@@ -32,7 +32,7 @@ options:
default: false
fail_level:
description:
- - Message level that will trigger failure.
+ - Message level that triggers failure.
- Default is the Django default value. Check the documentation for the version being used.
type: str
choices: [CRITICAL, ERROR, WARNING, INFO, DEBUG]
@@ -49,7 +49,7 @@ options:
elements: str
notes:
- The outcome of the module is found in the common return values RV(ignore:stdout), RV(ignore:stderr), RV(ignore:rc).
- - The module will fail if RV(ignore:rc) is not zero.
+ - The module fails if RV(ignore:rc) is not zero.
attributes:
check_mode:
support: full
@@ -57,7 +57,7 @@ attributes:
support: none
"""
-EXAMPLES = """
+EXAMPLES = r"""
- name: Check the entire project
community.general.django_check:
settings: myproject.settings
@@ -72,11 +72,17 @@ EXAMPLES = """
venv: /home/joedoe/project/fancysite/venv
"""
-RETURN = """
+RETURN = r"""
run_info:
description: Command-line execution information.
type: dict
returned: success and C(verbosity) >= 3
+version:
+ description: Version of Django.
+ type: str
+ returned: always
+ sample: 5.1.2
+ version_added: 10.0.0
"""
from ansible_collections.community.general.plugins.module_utils.django import DjangoModuleHelper
diff --git a/plugins/modules/django_command.py b/plugins/modules/django_command.py
index dcb8d26313..72cffb5c9c 100644
--- a/plugins/modules/django_command.py
+++ b/plugins/modules/django_command.py
@@ -7,7 +7,7 @@
from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = """
+DOCUMENTATION = r"""
module: django_command
author:
- Alexei Znamensky (@russoz)
@@ -36,7 +36,7 @@ options:
- List of extra arguments passed to the django admin command.
"""
-EXAMPLES = """
+EXAMPLES = r"""
- name: Check the project
community.general.django_command:
command: check
@@ -50,11 +50,17 @@ EXAMPLES = """
venv: /home/joedoe/project/fancysite/venv
"""
-RETURN = """
+RETURN = r"""
run_info:
description: Command-line execution information.
type: dict
returned: success and O(verbosity) >= 3
+version:
+ description: Version of Django.
+ type: str
+ returned: always
+ sample: 5.1.2
+ version_added: 10.0.0
"""
import shlex
diff --git a/plugins/modules/django_createcachetable.py b/plugins/modules/django_createcachetable.py
index b038e0358f..4d849624a9 100644
--- a/plugins/modules/django_createcachetable.py
+++ b/plugins/modules/django_createcachetable.py
@@ -7,7 +7,7 @@
from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = """
+DOCUMENTATION = r"""
module: django_createcachetable
author:
- Alexei Znamensky (@russoz)
@@ -26,7 +26,7 @@ attributes:
support: none
"""
-EXAMPLES = """
+EXAMPLES = r"""
- name: Create cache table in the default database
community.general.django_createcachetable:
settings: myproject.settings
@@ -39,11 +39,17 @@ EXAMPLES = """
venv: /home/joedoe/project/fancysite/venv
"""
-RETURN = """
+RETURN = r"""
run_info:
description: Command-line execution information.
type: dict
returned: success and O(verbosity) >= 3
+version:
+ description: Version of Django.
+ type: str
+ returned: always
+ sample: 5.1.2
+ version_added: 10.0.0
"""
from ansible_collections.community.general.plugins.module_utils.django import DjangoModuleHelper
diff --git a/plugins/modules/django_manage.py b/plugins/modules/django_manage.py
index 352bfe4b50..0fe07890f8 100644
--- a/plugins/modules/django_manage.py
+++ b/plugins/modules/django_manage.py
@@ -10,13 +10,12 @@ from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
module: django_manage
short_description: Manages a Django application
description:
- - Manages a Django application using the C(manage.py) application frontend to C(django-admin). With the
- O(virtualenv) parameter, all management commands will be executed by the given C(virtualenv) installation.
+ - Manages a Django application using the C(manage.py) application frontend to C(django-admin). With the O(virtualenv) parameter,
+ all management commands are executed by the given C(virtualenv) installation.
extends_documentation_fragment:
- community.general.attributes
attributes:
@@ -27,17 +26,18 @@ attributes:
options:
command:
description:
- - The name of the Django management command to run. The commands listed below are built in this module and have some basic parameter validation.
+ - The name of the Django management command to run. The commands listed below are built in this module and have some
+ basic parameter validation.
- V(collectstatic) - Collects the static files into C(STATIC_ROOT).
- V(createcachetable) - Creates the cache tables for use with the database cache backend.
- V(flush) - Removes all data from the database.
- V(loaddata) - Searches for and loads the contents of the named O(fixtures) into the database.
- V(migrate) - Synchronizes the database state with models and migrations.
- V(test) - Runs tests for all installed apps.
- - Other commands can be entered, but will fail if they are unknown to Django. Other commands that may
- prompt for user input should be run with the C(--noinput) flag.
- - Support for the values V(cleanup), V(syncdb), V(validate) was removed in community.general 9.0.0.
- See note about supported versions of Django.
+ - Custom commands can be entered, but they fail unless they are known to Django. Custom commands that may prompt for
+ user input should be run with the C(--noinput) flag.
+ - Support for the values V(cleanup), V(syncdb), V(validate) was removed in community.general 9.0.0. See note about supported
+ versions of Django.
type: str
required: true
project_path:
@@ -53,8 +53,8 @@ options:
required: false
pythonpath:
description:
- - A directory to add to the Python path. Typically used to include the settings module if it is located
- external to the application directory.
+ - A directory to add to the Python path. Typically used to include the settings module if it is located external to
+ the application directory.
- This would be equivalent to adding O(pythonpath)'s value to the E(PYTHONPATH) environment variable.
type: path
required: false
@@ -62,7 +62,7 @@ options:
virtualenv:
description:
- An optional path to a C(virtualenv) installation to use while running the manage application.
- - The virtual environment must exist, otherwise the module will fail.
+ - The virtual environment must exist, otherwise the module fails.
type: path
aliases: [virtual_env]
apps:
@@ -78,14 +78,13 @@ options:
clear:
description:
- Clear the existing files before trying to copy or link the original file.
- - Used only with the V(collectstatic) command. The C(--noinput) argument will be added automatically.
+ - Used only with the V(collectstatic) command. The C(--noinput) argument is added automatically.
required: false
default: false
type: bool
database:
description:
- - The database to target. Used by the V(createcachetable), V(flush), V(loaddata), V(syncdb),
- and V(migrate) commands.
+ - The database to target. Used by the V(createcachetable), V(flush), V(loaddata), V(syncdb), and V(migrate) commands.
type: str
required: false
failfast:
@@ -102,19 +101,18 @@ options:
required: false
skip:
description:
- - Will skip over out-of-order missing migrations, you can only use this parameter with V(migrate) command.
+ - Skips over out-of-order missing migrations, you can only use this parameter with V(migrate) command.
required: false
type: bool
merge:
description:
- - Will run out-of-order or missing migrations as they are not rollback migrations, you can only use this
- parameter with V(migrate) command.
+ - Runs out-of-order or missing migrations as they are not rollback migrations, you can only use this parameter with
+ V(migrate) command.
required: false
type: bool
link:
description:
- - Will create links to the files instead of copying them, you can only use this parameter with
- V(collectstatic) command.
+ - Creates links to the files instead of copying them, you can only use this parameter with V(collectstatic) command.
required: false
type: bool
testrunner:
@@ -124,28 +122,19 @@ options:
type: str
required: false
aliases: [test_runner]
- ack_venv_creation_deprecation:
- description:
- - This option no longer has any effect since community.general 9.0.0.
- - It will be removed from community.general 11.0.0.
- type: bool
- version_added: 5.8.0
notes:
- - >
- B(ATTENTION): Support for Django releases older than 4.1 has been removed in
- community.general version 9.0.0. While the module allows for free-form commands
- does not verify the version of Django being used, it is B(strongly recommended)
- to use a more recent version of Django.
+ - 'B(ATTENTION): Support for Django releases older than 4.1 has been removed in community.general version 9.0.0. While the
+ module allows for free-form commands, not verifying the version of Django being used, it is B(strongly recommended) to
+ use a more recent version of the framework.'
- Please notice that Django 4.1 requires Python 3.8 or greater.
- - This module will not create a virtualenv if the O(virtualenv) parameter is specified and a virtual environment
- does not already exist at the given location. This behavior changed in community.general version 9.0.0.
+ - This module does not create a virtualenv if the O(virtualenv) parameter is specified and a virtual environment does not
+ already exist at the given location. This behavior changed in community.general version 9.0.0.
- The recommended way to create a virtual environment in Ansible is by using M(ansible.builtin.pip).
- - This module assumes English error messages for the V(createcachetable) command to detect table existence,
- unfortunately.
+ - This module assumes English error messages for the V(createcachetable) command to detect table existence, unfortunately.
- To be able to use the V(collectstatic) command, you must have enabled C(staticfiles) in your settings.
- - Your C(manage.py) application must be executable (C(rwxr-xr-x)), and must have a valid shebang,
- for example C(#!/usr/bin/env python), for invoking the appropriate Python interpreter.
+ - Your C(manage.py) application must be executable (C(rwxr-xr-x)), and must have a valid shebang, for example C(#!/usr/bin/env
+ python), for invoking the appropriate Python interpreter.
seealso:
- name: django-admin and manage.py Reference
description: Reference for C(django-admin) or C(manage.py) commands.
@@ -156,13 +145,13 @@ seealso:
- name: What Python version can I use with Django?
description: From the Django FAQ, the response to Python requirements for the framework.
link: https://docs.djangoproject.com/en/dev/faq/install/#what-python-version-can-i-use-with-django
-requirements: [ "django >= 4.1" ]
+requirements: ["django >= 4.1"]
author:
- Alexei Znamensky (@russoz)
- Scott Anderson (@tastychutney)
-'''
+"""
-EXAMPLES = """
+EXAMPLES = r"""
- name: Run cleanup on the application installed in django_dir
community.general.django_manage:
command: clearsessions
@@ -296,7 +285,6 @@ def main():
skip=dict(type='bool'),
merge=dict(type='bool'),
link=dict(type='bool'),
- ack_venv_creation_deprecation=dict(type='bool', removed_in_version='11.0.0', removed_from_collection='community.general'),
),
)
diff --git a/plugins/modules/dnf_config_manager.py b/plugins/modules/dnf_config_manager.py
index b8175c3053..eb64bee864 100644
--- a/plugins/modules/dnf_config_manager.py
+++ b/plugins/modules/dnf_config_manager.py
@@ -7,8 +7,7 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
-DOCUMENTATION = r'''
----
+DOCUMENTATION = r"""
module: dnf_config_manager
short_description: Enable or disable dnf repositories using config-manager
version_added: 8.2.0
@@ -45,9 +44,9 @@ notes:
seealso:
- module: ansible.builtin.dnf
- module: ansible.builtin.yum_repository
-'''
+"""
-EXAMPLES = r'''
+EXAMPLES = r"""
- name: Ensure the crb repository is enabled
community.general.dnf_config_manager:
name: crb
@@ -59,9 +58,9 @@ EXAMPLES = r'''
- appstream
- zfs
state: disabled
-'''
+"""
-RETURN = r'''
+RETURN = r"""
repo_states_pre:
description: Repo IDs before action taken.
returned: success
@@ -117,12 +116,12 @@ repo_states_post:
- crb-debug
- crb-source
changed_repos:
- description: Repositories changed.
- returned: success
- type: list
- elements: str
- sample: [ 'crb' ]
-'''
+ description: Repositories changed.
+ returned: success
+ type: list
+ elements: str
+ sample: ["crb"]
+"""
from ansible.module_utils.basic import AnsibleModule
import os
@@ -176,8 +175,8 @@ def pack_repo_states_for_return(states):
def main():
module_args = dict(
- name=dict(type='list', elements='str', required=False, default=[]),
- state=dict(type='str', required=False, choices=['enabled', 'disabled'], default='enabled')
+ name=dict(type='list', elements='str', default=[]),
+ state=dict(type='str', choices=['enabled', 'disabled'], default='enabled')
)
result = dict(
diff --git a/plugins/modules/dnf_versionlock.py b/plugins/modules/dnf_versionlock.py
index 11d446f221..b3e2e2bcc9 100644
--- a/plugins/modules/dnf_versionlock.py
+++ b/plugins/modules/dnf_versionlock.py
@@ -7,37 +7,32 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
-DOCUMENTATION = r'''
----
+DOCUMENTATION = r"""
module: dnf_versionlock
version_added: '4.0.0'
short_description: Locks package versions in C(dnf) based systems
description:
-- Locks package versions using the C(versionlock) plugin in C(dnf) based
- systems. This plugin takes a set of name and versions for packages and
- excludes all other versions of those packages. This allows you to for example
- protect packages from being updated by newer versions. The state of the
- plugin that reflects locking of packages is the C(locklist).
+ - Locks package versions using the C(versionlock) plugin in C(dnf) based systems. This plugin takes a set of name and versions
+ for packages and excludes all other versions of those packages. This allows you to for example protect packages from being
+ updated by newer versions. The state of the plugin that reflects locking of packages is the C(locklist).
extends_documentation_fragment:
- community.general.attributes
attributes:
check_mode:
support: partial
details:
- - The logics of the C(versionlock) plugin for corner cases could be
- confusing, so please take in account that this module will do its best to
- give a C(check_mode) prediction on what is going to happen. In case of
- doubt, check the documentation of the plugin.
- - Sometimes the module could predict changes in C(check_mode) that will not
- be such because C(versionlock) concludes that there is already a entry in
- C(locklist) that already matches.
+ - The logics of the C(versionlock) plugin for corner cases could be confusing, so please take in account that this module
+ does its best to give a C(check_mode) prediction on what is going to happen. In case of doubt, check the documentation
+ of the plugin.
+ - Sometimes the module could predict changes in C(check_mode) that are not fulfilled because C(versionlock) concludes
+ that there is already a entry in C(locklist) that already matches.
diff_mode:
support: none
options:
name:
description:
- - Package name spec to add or exclude to or delete from the C(locklist)
- using the format expected by the C(dnf repoquery) command.
+ - Package name spec to add or exclude to or delete from the C(locklist) using the format expected by the C(dnf repoquery)
+ command.
- This parameter is mutually exclusive with O(state=clean).
type: list
required: false
@@ -45,44 +40,35 @@ options:
default: []
raw:
description:
- - Do not resolve package name specs to NEVRAs to find specific version
- to lock to. Instead the package name specs are used as they are. This
- enables locking to not yet available versions of the package.
+ - Do not resolve package name specs to NEVRAs to find specific version to lock to. Instead the package name specs are
+ used as they are. This enables locking to not yet available versions of the package.
type: bool
default: false
state:
description:
- - Whether to add (V(present) or V(excluded)) to or remove (V(absent) or
- V(clean)) from the C(locklist).
- - V(present) will add a package name spec to the C(locklist). If there is a
- installed package that matches, then only that version will be added.
- Otherwise, all available package versions will be added.
- - V(excluded) will add a package name spec as excluded to the
- C(locklist). It means that packages represented by the package name
- spec will be excluded from transaction operations. All available
- package versions will be added.
- - V(absent) will delete entries in the C(locklist) that match the
- package name spec.
- - V(clean) will delete all entries in the C(locklist). This option is
- mutually exclusive with O(name).
- choices: [ 'absent', 'clean', 'excluded', 'present' ]
+ - Whether to add (V(present) or V(excluded)) to or remove (V(absent) or V(clean)) from the C(locklist).
+ - V(present) adds a package name spec to the C(locklist). If there is a installed package that matches, then only that
+ version is added. Otherwise, all available package versions are added.
+ - V(excluded) adds a package name spec as excluded to the C(locklist). It means that packages represented by the package
+ name spec are excluded from transaction operations. All available package versions are added.
+ - V(absent) deletes entries in the C(locklist) that match the package name spec.
+ - V(clean) deletes all entries in the C(locklist). This option is mutually exclusive with O(name).
+ choices: ['absent', 'clean', 'excluded', 'present']
type: str
default: present
notes:
- - In an ideal world, the C(versionlock) plugin would have a dry-run option to
- know for sure what is going to happen. So far we have to work with a best
- guess as close as possible to the behaviour inferred from its code.
- - For most of cases where you want to lock and unlock specific versions of a
- package, this works fairly well.
+ - In an ideal world, the C(versionlock) plugin would have a dry-run option to know for sure what is going to happen. So
+ far we have to work with a best guess as close as possible to the behaviour inferred from its code.
+ - For most of cases where you want to lock and unlock specific versions of a package, this works fairly well.
- Does not work with C(dnf5).
requirements:
- dnf
- dnf-plugin-versionlock
author:
- Roberto Moreda (@moreda)
-'''
+"""
-EXAMPLES = r'''
+EXAMPLES = r"""
- name: Prevent installed nginx from being updated
community.general.dnf_versionlock:
name: nginx
@@ -97,12 +83,12 @@ EXAMPLES = r'''
- name: Remove lock from nginx to be updated again
community.general.dnf_versionlock:
- package: nginx
+ name: nginx
state: absent
- name: Exclude bind 32:9.11 from installs or updates
community.general.dnf_versionlock:
- package: bind-32:9.11*
+ name: bind-32:9.11*
state: excluded
- name: Keep bash package in major version 4
@@ -114,34 +100,34 @@ EXAMPLES = r'''
- name: Delete all entries in the locklist of versionlock
community.general.dnf_versionlock:
state: clean
-'''
+"""
-RETURN = r'''
+RETURN = r"""
locklist_pre:
- description: Locklist before module execution.
- returned: success
- type: list
- elements: str
- sample: [ 'bash-0:4.4.20-1.el8_4.*', '!bind-32:9.11.26-4.el8_4.*' ]
+ description: Locklist before module execution.
+ returned: success
+ type: list
+ elements: str
+ sample: ["bash-0:4.4.20-1.el8_4.*", "!bind-32:9.11.26-4.el8_4.*"]
locklist_post:
- description: Locklist after module execution.
- returned: success and (not check mode or state is clean)
- type: list
- elements: str
- sample: [ 'bash-0:4.4.20-1.el8_4.*' ]
+ description: Locklist after module execution.
+ returned: success and (not check mode or state is clean)
+ type: list
+ elements: str
+ sample: ["bash-0:4.4.20-1.el8_4.*"]
specs_toadd:
- description: Package name specs meant to be added by versionlock.
- returned: success
- type: list
- elements: str
- sample: [ 'bash' ]
+ description: Package name specs meant to be added by versionlock.
+ returned: success
+ type: list
+ elements: str
+ sample: ["bash"]
specs_todelete:
- description: Package name specs meant to be deleted by versionlock.
- returned: success
- type: list
- elements: str
- sample: [ 'bind' ]
-'''
+ description: Package name specs meant to be deleted by versionlock.
+ returned: success
+ type: list
+ elements: str
+ sample: ["bind"]
+"""
from ansible.module_utils.basic import AnsibleModule
import fnmatch
diff --git a/plugins/modules/dnsimple.py b/plugins/modules/dnsimple.py
index c5829e36eb..729c876841 100644
--- a/plugins/modules/dnsimple.py
+++ b/plugins/modules/dnsimple.py
@@ -10,12 +10,11 @@ from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
module: dnsimple
short_description: Interface with dnsimple.com (a DNS hosting service)
description:
- - "Manages domains and records via the DNSimple API, see the docs: U(http://developer.dnsimple.com/)."
+ - 'Manages domains and records using the DNSimple API, see the docs: U(http://developer.dnsimple.com/).'
extends_documentation_fragment:
- community.general.attributes
attributes:
@@ -26,9 +25,9 @@ attributes:
options:
account_email:
description:
- - Account email. If omitted, the environment variables E(DNSIMPLE_EMAIL) and E(DNSIMPLE_API_TOKEN) will be looked for.
- - "If those aren't found, a C(.dnsimple) file will be looked for, see: U(https://github.com/mikemaccana/dnsimple-python#getting-started)."
- - "C(.dnsimple) config files are only supported in dnsimple-python<2.0.0"
+ - Account email. If omitted, the environment variables E(DNSIMPLE_EMAIL) and E(DNSIMPLE_API_TOKEN) are looked for.
+ - 'If those variables are not found, a C(.dnsimple) file is looked for, see: U(https://github.com/mikemaccana/dnsimple-python#getting-started).'
+ - C(.dnsimple) config files are only supported in dnsimple-python<2.0.0.
type: str
account_api_token:
description:
@@ -36,13 +35,13 @@ options:
type: str
domain:
description:
- - Domain to work with. Can be the domain name (e.g. "mydomain.com") or the numeric ID of the domain in DNSimple.
- - If omitted, a list of domains will be returned.
- - If domain is present but the domain doesn't exist, it will be created.
+ - Domain to work with. Can be the domain name (for example V(mydomain.com)) or the numeric ID of the domain in DNSimple.
+ - If omitted, a list of domains is returned.
+ - If domain is present but the domain does not exist, it is created.
type: str
record:
description:
- - Record to add, if blank a record for the domain will be created, supports the wildcard (*).
+ - Record to add, if blank a record for the domain is created, supports the wildcard (*).
type: str
record_ids:
description:
@@ -52,7 +51,23 @@ options:
type:
description:
- The type of DNS record to create.
- choices: [ 'A', 'ALIAS', 'CNAME', 'MX', 'SPF', 'URL', 'TXT', 'NS', 'SRV', 'NAPTR', 'PTR', 'AAAA', 'SSHFP', 'HINFO', 'POOL', 'CAA' ]
+ choices:
+ - A
+ - ALIAS
+ - CNAME
+ - MX
+ - SPF
+ - URL
+ - TXT
+ - NS
+ - SRV
+ - NAPTR
+ - PTR
+ - AAAA
+ - SSHFP
+ - HINFO
+ - POOL
+ - CAA
type: str
ttl:
description:
@@ -70,8 +85,8 @@ options:
type: int
state:
description:
- - whether the record should exist or not.
- choices: [ 'present', 'absent' ]
+ - Whether the record should exist or not.
+ choices: ['present', 'absent']
default: present
type: str
solo:
@@ -91,9 +106,9 @@ options:
requirements:
- "dnsimple >= 2.0.0"
author: "Alex Coomans (@drcapulet)"
-'''
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Authenticate using email and API token and fetch all domains
community.general.dnsimple:
account_email: test@example.com
@@ -149,9 +164,9 @@ EXAMPLES = '''
value: example.com
state: absent
delegate_to: localhost
-'''
+"""
-RETURN = r"""# """
+RETURN = r"""#"""
import traceback
import re
diff --git a/plugins/modules/dnsimple_info.py b/plugins/modules/dnsimple_info.py
index 46c2877f73..78b4ceae25 100644
--- a/plugins/modules/dnsimple_info.py
+++ b/plugins/modules/dnsimple_info.py
@@ -9,8 +9,7 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
-DOCUMENTATION = r'''
----
+DOCUMENTATION = r"""
module: dnsimple_info
short_description: Pull basic info from DNSimple API
@@ -20,45 +19,45 @@ version_added: "4.2.0"
description: Retrieve existing records and domains from DNSimple API.
extends_documentation_fragment:
- - community.general.attributes
- - community.general.attributes.info_module
+ - community.general.attributes
+ - community.general.attributes.info_module
options:
- name:
- description:
- - The domain name to retrieve info from.
- - Will return all associated records for this domain if specified.
- - If not specified, will return all domains associated with the account ID.
- type: str
+ name:
+ description:
+ - The domain name to retrieve info from.
+ - Returns all associated records for this domain if specified.
+ - If not specified, returns all domains associated with the account ID.
+ type: str
- account_id:
- description: The account ID to query.
- required: true
- type: str
+ account_id:
+ description: The account ID to query.
+ required: true
+ type: str
- api_key:
- description: The API key to use.
- required: true
- type: str
+ api_key:
+ description: The API key to use.
+ required: true
+ type: str
- record:
- description:
- - The record to find.
- - If specified, only this record will be returned instead of all records.
- required: false
- type: str
+ record:
+ description:
+ - The record to find.
+ - If specified, only this record is returned instead of all records.
+ required: false
+ type: str
- sandbox:
- description: Whether or not to use sandbox environment.
- required: false
- default: false
- type: bool
+ sandbox:
+ description: Whether or not to use sandbox environment.
+ required: false
+ default: false
+ type: bool
author:
- - Edward Hilgendorf (@edhilgendorf)
-'''
+ - Edward Hilgendorf (@edhilgendorf)
+"""
-EXAMPLES = r'''
+EXAMPLES = r"""
- name: Get all domains from an account
community.general.dnsimple_info:
account_id: "1234"
@@ -76,15 +75,15 @@ EXAMPLES = r'''
record: "subdomain"
account_id: "1234"
api_key: "1234"
-'''
+"""
-RETURN = r'''
+RETURN = r"""
dnsimple_domain_info:
- description: Returns a list of dictionaries of all domains associated with the supplied account ID.
- type: list
- elements: dict
- returned: success when O(name) is not specified
- sample:
+ description: Returns a list of dictionaries of all domains associated with the supplied account ID.
+ type: list
+ elements: dict
+ returned: success when O(name) is not specified
+ sample:
- account_id: 1234
created_at: '2021-10-16T21:25:42Z'
id: 123456
@@ -93,41 +92,41 @@ dnsimple_domain_info:
reverse: false
secondary: false
updated_at: '2021-11-10T20:22:50Z'
- contains:
- account_id:
- description: The account ID.
- type: int
- created_at:
- description: When the domain entry was created.
- type: str
- id:
- description: ID of the entry.
- type: int
- last_transferred_at:
- description: Date the domain was transferred, or empty if not.
- type: str
- name:
- description: Name of the record.
- type: str
- reverse:
- description: Whether or not it is a reverse zone record.
- type: bool
- updated_at:
- description: When the domain entry was updated.
- type: str
+ contains:
+ account_id:
+ description: The account ID.
+ type: int
+ created_at:
+ description: When the domain entry was created.
+ type: str
+ id:
+ description: ID of the entry.
+ type: int
+ last_transferred_at:
+ description: Date the domain was transferred, or empty if not.
+ type: str
+ name:
+ description: Name of the record.
+ type: str
+ reverse:
+ description: Whether or not it is a reverse zone record.
+ type: bool
+ updated_at:
+ description: When the domain entry was updated.
+ type: str
dnsimple_records_info:
- description: Returns a list of dictionaries with all records for the domain supplied.
- type: list
- elements: dict
- returned: success when O(name) is specified, but O(record) is not
- sample:
+ description: Returns a list of dictionaries with all records for the domain supplied.
+ type: list
+ elements: dict
+ returned: success when O(name) is specified, but O(record) is not
+ sample:
- content: ns1.dnsimple.com admin.dnsimple.com
created_at: '2021-10-16T19:07:34Z'
id: 12345
name: 'catheadbiscuit'
- parent_id: null
- priority: null
+ parent_id:
+ priority:
regions:
- global
system_record: true
@@ -135,55 +134,55 @@ dnsimple_records_info:
type: SOA
updated_at: '2021-11-15T23:55:51Z'
zone_id: example.com
- contains:
- content:
- description: Content of the returned record.
- type: str
- created_at:
- description: When the domain entry was created.
- type: str
- id:
- description: ID of the entry.
- type: int
- name:
- description: Name of the record.
- type: str
- parent_id:
- description: Parent record or null.
- type: int
- priority:
- description: Priority setting of the record.
- type: str
- regions:
- description: List of regions where the record is available.
- type: list
- system_record:
- description: Whether or not it is a system record.
- type: bool
- ttl:
- description: Record TTL.
- type: int
- type:
- description: Record type.
- type: str
- updated_at:
- description: When the domain entry was updated.
- type: str
- zone_id:
- description: ID of the zone that the record is associated with.
- type: str
+ contains:
+ content:
+ description: Content of the returned record.
+ type: str
+ created_at:
+ description: When the domain entry was created.
+ type: str
+ id:
+ description: ID of the entry.
+ type: int
+ name:
+ description: Name of the record.
+ type: str
+ parent_id:
+ description: Parent record or null.
+ type: int
+ priority:
+ description: Priority setting of the record.
+ type: str
+ regions:
+ description: List of regions where the record is available.
+ type: list
+ system_record:
+ description: Whether or not it is a system record.
+ type: bool
+ ttl:
+ description: Record TTL.
+ type: int
+ type:
+ description: Record type.
+ type: str
+ updated_at:
+ description: When the domain entry was updated.
+ type: str
+ zone_id:
+ description: ID of the zone that the record is associated with.
+ type: str
dnsimple_record_info:
- description: Returns a list of dictionaries that match the record supplied.
- returned: success when O(name) and O(record) are specified
- type: list
- elements: dict
- sample:
+ description: Returns a list of dictionaries that match the record supplied.
+ returned: success when O(name) and O(record) are specified
+ type: list
+ elements: dict
+ sample:
- content: 1.2.3.4
created_at: '2021-11-15T23:55:51Z'
id: 123456
name: catheadbiscuit
- parent_id: null
- priority: null
+ parent_id:
+ priority:
regions:
- global
system_record: false
@@ -191,44 +190,44 @@ dnsimple_record_info:
type: A
updated_at: '2021-11-15T23:55:51Z'
zone_id: example.com
- contains:
- content:
- description: Content of the returned record.
- type: str
- created_at:
- description: When the domain entry was created.
- type: str
- id:
- description: ID of the entry.
- type: int
- name:
- description: Name of the record.
- type: str
- parent_id:
- description: Parent record or null.
- type: int
- priority:
- description: Priority setting of the record.
- type: str
- regions:
- description: List of regions where the record is available.
- type: list
- system_record:
- description: Whether or not it is a system record.
- type: bool
- ttl:
- description: Record TTL.
- type: int
- type:
- description: Record type.
- type: str
- updated_at:
- description: When the domain entry was updated.
- type: str
- zone_id:
- description: ID of the zone that the record is associated with.
- type: str
-'''
+ contains:
+ content:
+ description: Content of the returned record.
+ type: str
+ created_at:
+ description: When the domain entry was created.
+ type: str
+ id:
+ description: ID of the entry.
+ type: int
+ name:
+ description: Name of the record.
+ type: str
+ parent_id:
+ description: Parent record or null.
+ type: int
+ priority:
+ description: Priority setting of the record.
+ type: str
+ regions:
+ description: List of regions where the record is available.
+ type: list
+ system_record:
+ description: Whether or not it is a system record.
+ type: bool
+ ttl:
+ description: Record TTL.
+ type: int
+ type:
+ description: Record type.
+ type: str
+ updated_at:
+ description: When the domain entry was updated.
+ type: str
+ zone_id:
+ description: ID of the zone that the record is associated with.
+ type: str
+"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.general.plugins.module_utils import deps
diff --git a/plugins/modules/dnsmadeeasy.py b/plugins/modules/dnsmadeeasy.py
index 47d9430e7b..ec17880af7 100644
--- a/plugins/modules/dnsmadeeasy.py
+++ b/plugins/modules/dnsmadeeasy.py
@@ -9,14 +9,12 @@ from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
module: dnsmadeeasy
short_description: Interface with dnsmadeeasy.com (a DNS hosting service)
description:
- - >
- Manages DNS records via the v2 REST API of the DNS Made Easy service. It handles records only; there is no manipulation of domains or
- monitor/account support yet. See: U(https://www.dnsmadeeasy.com/integration/restapi/)
+ - 'Manages DNS records using the v2 REST API of the DNS Made Easy service. It handles records only; there is no manipulation
+ of domains or monitor/account support yet. See: U(https://www.dnsmadeeasy.com/integration/restapi/).'
extends_documentation_fragment:
- community.general.attributes
attributes:
@@ -39,8 +37,8 @@ options:
domain:
description:
- - Domain to work with. Can be the domain name (e.g. "mydomain.com") or the numeric ID of the domain in DNS Made Easy (e.g. "839989") for faster
- resolution
+ - Domain to work with. Can be the domain name (for example V(mydomain.com)) or the numeric ID of the domain in DNS Made
+ Easy (for example V(839989)) for faster resolution.
required: true
type: str
@@ -52,49 +50,47 @@ options:
record_name:
description:
- - Record name to get/create/delete/update. If record_name is not specified; all records for the domain will be returned in "result" regardless
- of the state argument.
+ - Record name to get/create/delete/update. If O(record_name) is not specified; all records for the domain are returned
+ in "result" regardless of the state argument.
type: str
record_type:
description:
- Record type.
- choices: [ 'A', 'AAAA', 'CNAME', 'ANAME', 'HTTPRED', 'MX', 'NS', 'PTR', 'SRV', 'TXT' ]
+ choices: ['A', 'AAAA', 'CNAME', 'ANAME', 'HTTPRED', 'MX', 'NS', 'PTR', 'SRV', 'TXT']
type: str
record_value:
description:
- - >
- Record value. HTTPRED: , MX: , NS: , PTR: ,
- SRV: , TXT: "
- - >
- If record_value is not specified; no changes will be made and the record will be returned in 'result'
- (in other words, this module can be used to fetch a record's current id, type, and ttl)
+ - 'Record value. HTTPRED: , MX: , NS: , PTR: , SRV:
+ , TXT: ".'
+ - If O(record_value) is not specified; no changes are made and the record is returned in RV(ignore:result) (in other
+ words, this module can be used to fetch a record's current ID, type, and TTL).
type: str
record_ttl:
description:
- - record's "Time to live". Number of seconds the record remains cached in DNS servers.
+ - Record's "Time-To-Live". Number of seconds the record remains cached in DNS servers.
default: 1800
type: int
state:
description:
- - whether the record should exist or not
+ - Whether the record should exist or not.
required: true
- choices: [ 'present', 'absent' ]
+ choices: ['present', 'absent']
type: str
validate_certs:
description:
- - If V(false), SSL certificates will not be validated. This should only be used
- on personally controlled sites using self-signed certificates.
+ - If V(false), SSL certificates are not validated. This should only be used on personally controlled sites using self-signed
+ certificates.
type: bool
default: true
monitor:
description:
- - If V(true), add or change the monitor. This is applicable only for A records.
+ - If V(true), add or change the monitor. This is applicable only for A records.
type: bool
default: false
@@ -132,7 +128,7 @@ options:
contactList:
description:
- - Name or id of the contact list that the monitor will notify.
+ - Name or ID of the contact list that the monitor notifies.
- The default V('') means the Account Owner.
type: str
@@ -153,7 +149,7 @@ options:
failover:
description:
- - If V(true), add or change the failover. This is applicable only for A records.
+ - If V(true), add or change the failover. This is applicable only for A records.
type: bool
default: false
@@ -192,20 +188,19 @@ options:
type: str
notes:
- - The DNS Made Easy service requires that machines interacting with the API have the proper time and timezone set. Be sure you are within a few
- seconds of actual time by using NTP.
- - This module returns record(s) and monitor(s) in the "result" element when 'state' is set to 'present'.
- These values can be be registered and used in your playbooks.
- - Only A records can have a monitor or failover.
- - To add failover, the 'failover', 'autoFailover', 'port', 'protocol', 'ip1', and 'ip2' options are required.
- - To add monitor, the 'monitor', 'port', 'protocol', 'maxEmails', 'systemDescription', and 'ip1' options are required.
- - The monitor and the failover will share 'port', 'protocol', and 'ip1' options.
-
-requirements: [ hashlib, hmac ]
+ - The DNS Made Easy service requires that machines interacting with the API have the proper time and timezone set. Be sure
+ you are within a few seconds of actual time by using NTP.
+ - This module returns record(s) and monitor(s) in the RV(ignore:result) element when O(state=present). These values can
+ be be registered and used in your playbooks.
+ - Only A records can have a O(monitor) or O(failover).
+ - To add failover, the O(failover), O(autoFailover), O(port), O(protocol), O(ip1), and O(ip2) options are required.
+ - To add monitor, the O(monitor), O(port), O(protocol), O(maxEmails), O(systemDescription), and O(ip1) options are required.
+ - The options O(monitor) and O(failover) share O(port), O(protocol), and O(ip1) options.
+requirements: [hashlib, hmac]
author: "Brice Burgess (@briceburg)"
-'''
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Fetch my.com domain records
community.general.dnsmadeeasy:
account_key: key
@@ -291,8 +286,8 @@ EXAMPLES = '''
record_value: 127.0.0.1
monitor: true
ip1: 127.0.0.2
- protocol: HTTP # default
- port: 80 # default
+ protocol: HTTP # default
+ port: 80 # default
maxEmails: 1
systemDescription: Monitor Test A record
contactList: my contact list
@@ -308,11 +303,11 @@ EXAMPLES = '''
record_value: 127.0.0.1
monitor: true
ip1: 127.0.0.2
- protocol: HTTP # default
- port: 80 # default
+ protocol: HTTP # default
+ port: 80 # default
maxEmails: 1
systemDescription: Monitor Test A record
- contactList: 1174 # contact list id
+ contactList: 1174 # contact list id
httpFqdn: http://my.com
httpFile: example
httpQueryString: some string
@@ -357,7 +352,7 @@ EXAMPLES = '''
record_type: A
record_value: 127.0.0.1
monitor: false
-'''
+"""
# ============================================
# DNSMadeEasy module specific support methods.
@@ -491,7 +486,7 @@ class DME2(object):
return self.query(self.record_url, 'GET')['data']
def _instMap(self, type):
- # @TODO cache this call so it's executed only once per ansible execution
+ # @TODO cache this call so it is executed only once per ansible execution
map = {}
results = {}
@@ -558,28 +553,28 @@ def main():
domain=dict(required=True),
sandbox=dict(default=False, type='bool'),
state=dict(required=True, choices=['present', 'absent']),
- record_name=dict(required=False),
- record_type=dict(required=False, choices=[
+ record_name=dict(),
+ record_type=dict(choices=[
'A', 'AAAA', 'CNAME', 'ANAME', 'HTTPRED', 'MX', 'NS', 'PTR', 'SRV', 'TXT']),
- record_value=dict(required=False),
- record_ttl=dict(required=False, default=1800, type='int'),
+ record_value=dict(),
+ record_ttl=dict(default=1800, type='int'),
monitor=dict(default=False, type='bool'),
systemDescription=dict(default=''),
maxEmails=dict(default=1, type='int'),
protocol=dict(default='HTTP', choices=['TCP', 'UDP', 'HTTP', 'DNS', 'SMTP', 'HTTPS']),
port=dict(default=80, type='int'),
sensitivity=dict(default='Medium', choices=['Low', 'Medium', 'High']),
- contactList=dict(default=None),
- httpFqdn=dict(required=False),
- httpFile=dict(required=False),
- httpQueryString=dict(required=False),
+ contactList=dict(),
+ httpFqdn=dict(),
+ httpFile=dict(),
+ httpQueryString=dict(),
failover=dict(default=False, type='bool'),
autoFailover=dict(default=False, type='bool'),
- ip1=dict(required=False),
- ip2=dict(required=False),
- ip3=dict(required=False),
- ip4=dict(required=False),
- ip5=dict(required=False),
+ ip1=dict(),
+ ip2=dict(),
+ ip3=dict(),
+ ip4=dict(),
+ ip5=dict(),
validate_certs=dict(default=True, type='bool'),
),
required_together=[
diff --git a/plugins/modules/dpkg_divert.py b/plugins/modules/dpkg_divert.py
index 5f0d924fe2..6ef1f394e4 100644
--- a/plugins/modules/dpkg_divert.py
+++ b/plugins/modules/dpkg_divert.py
@@ -9,24 +9,20 @@ from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = r'''
----
+DOCUMENTATION = r"""
module: dpkg_divert
short_description: Override a debian package's version of a file
version_added: '0.2.0'
author:
- quidame (@quidame)
description:
- - A diversion is for C(dpkg) the knowledge that only a given package
- (or the local administrator) is allowed to install a file at a given
- location. Other packages shipping their own version of this file will
- be forced to O(divert) it, that is to install it at another location. It
- allows one to keep changes in a file provided by a debian package by
- preventing its overwrite at package upgrade.
- - This module manages diversions of debian packages files using the
- C(dpkg-divert) commandline tool. It can either create or remove a
- diversion for a given file, but also update an existing diversion
- to modify its O(holder) and/or its O(divert) location.
+ - A diversion is for C(dpkg) the knowledge that only a given package (or the local administrator) is allowed to install
+ a file at a given location. Other packages shipping their own version of this file are forced to O(divert) it, that is
+ to install it at another location. It allows one to keep changes in a file provided by a debian package by preventing
+ it being overwritten on package upgrade.
+ - This module manages diversions of debian packages files using the C(dpkg-divert) commandline tool. It can either create
+ or remove a diversion for a given file, but also update an existing diversion to modify its O(holder) and/or its O(divert)
+ location.
extends_documentation_fragment:
- community.general.attributes
attributes:
@@ -37,60 +33,52 @@ attributes:
options:
path:
description:
- - The original and absolute path of the file to be diverted or
- undiverted. This path is unique, i.e. it is not possible to get
- two diversions for the same O(path).
+ - The original and absolute path of the file to be diverted or undiverted. This path is unique, in other words it is
+ not possible to get two diversions for the same O(path).
required: true
type: path
state:
description:
- - When O(state=absent), remove the diversion of the specified
- O(path); when O(state=present), create the diversion if it does
- not exist, or update its package O(holder) or O(divert) location,
- if it already exists.
+ - When O(state=absent), remove the diversion of the specified O(path); when O(state=present), create the diversion if
+ it does not exist, or update its package O(holder) or O(divert) location, if it already exists.
type: str
default: present
choices: [absent, present]
holder:
description:
- - The name of the package whose copy of file is not diverted, also
- known as the diversion holder or the package the diversion belongs
- to.
- - The actual package does not have to be installed or even to exist
- for its name to be valid. If not specified, the diversion is hold
- by 'LOCAL', that is reserved by/for dpkg for local diversions.
+ - The name of the package whose copy of file is not diverted, also known as the diversion holder or the package the
+ diversion belongs to.
+ - The actual package does not have to be installed or even to exist for its name to be valid. If not specified, the
+ diversion is hold by 'LOCAL', that is reserved by/for dpkg for local diversions.
- This parameter is ignored when O(state=absent).
type: str
divert:
description:
- - The location where the versions of file will be diverted.
+ - The location where the versions of file are diverted.
- Default is to add suffix C(.distrib) to the file path.
- This parameter is ignored when O(state=absent).
type: path
rename:
description:
- - Actually move the file aside (when O(state=present)) or back (when
- O(state=absent)), but only when changing the state of the diversion.
- This parameter has no effect when attempting to add a diversion that
- already exists or when removing an unexisting one.
- - Unless O(force=true), renaming fails if the destination file already
- exists (this lock being a dpkg-divert feature, and bypassing it being
- a module feature).
+ - Actually move the file aside (when O(state=present)) or back (when O(state=absent)), but only when changing the state
+ of the diversion. This parameter has no effect when attempting to add a diversion that already exists or when removing
+ an unexisting one.
+ - Unless O(force=true), renaming fails if the destination file already exists (this lock being a dpkg-divert feature,
+ and bypassing it being a module feature).
type: bool
default: false
force:
description:
- - When O(rename=true) and O(force=true), renaming is performed even if
- the target of the renaming exists, i.e. the existing contents of the
- file at this location will be lost.
+ - When O(rename=true) and O(force=true), renaming is performed even if the target of the renaming exists, in other words
+ the existing contents of the file at this location are lost.
- This parameter is ignored when O(rename=false).
type: bool
default: false
requirements:
- dpkg-divert >= 1.15.0 (Debian family)
-'''
+"""
-EXAMPLES = r'''
+EXAMPLES = r"""
- name: Divert /usr/bin/busybox to /usr/bin/busybox.distrib and keep file in place
community.general.dpkg_divert:
path: /usr/bin/busybox
@@ -112,9 +100,9 @@ EXAMPLES = r'''
state: absent
rename: true
force: true
-'''
+"""
-RETURN = r'''
+RETURN = r"""
commands:
description: The dpkg-divert commands ran internally by the module.
type: list
@@ -151,7 +139,7 @@ diversion:
"path": "/etc/foobarrc",
"state": "present"
}
-'''
+"""
import re
@@ -178,11 +166,11 @@ def main():
module = AnsibleModule(
argument_spec=dict(
path=dict(required=True, type='path'),
- state=dict(required=False, type='str', default='present', choices=['absent', 'present']),
- holder=dict(required=False, type='str'),
- divert=dict(required=False, type='path'),
- rename=dict(required=False, type='bool', default=False),
- force=dict(required=False, type='bool', default=False),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ holder=dict(type='str'),
+ divert=dict(type='path'),
+ rename=dict(type='bool', default=False),
+ force=dict(type='bool', default=False),
),
supports_check_mode=True,
)
diff --git a/plugins/modules/easy_install.py b/plugins/modules/easy_install.py
index 2e8fc2f4f0..8d0a39333e 100644
--- a/plugins/modules/easy_install.py
+++ b/plugins/modules/easy_install.py
@@ -9,14 +9,13 @@ from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
module: easy_install
short_description: Installs Python libraries
description:
- - Installs Python libraries, optionally in a C(virtualenv)
+ - Installs Python libraries, optionally in a C(virtualenv).
extends_documentation_fragment:
- - community.general.attributes
+ - community.general.attributes
attributes:
check_mode:
support: full
@@ -31,31 +30,25 @@ options:
virtualenv:
type: str
description:
- - An optional O(virtualenv) directory path to install into. If the
- O(virtualenv) does not exist, it is created automatically.
+ - An optional O(virtualenv) directory path to install into. If the O(virtualenv) does not exist, it is created automatically.
virtualenv_site_packages:
description:
- - Whether the virtual environment will inherit packages from the
- global site-packages directory. Note that if this setting is
- changed on an already existing virtual environment it will not
- have any effect, the environment must be deleted and newly
- created.
+ - Whether the virtual environment inherits packages from the global site-packages directory. Note that this setting
+ has no effect on an already existing virtual environment, so if you want to change it, the environment must be deleted
+ and newly created.
type: bool
default: false
virtualenv_command:
type: str
description:
- - The command to create the virtual environment with. For example
- V(pyvenv), V(virtualenv), V(virtualenv2).
+ - The command to create the virtual environment with. For example V(pyvenv), V(virtualenv), V(virtualenv2).
default: virtualenv
executable:
type: str
description:
- - The explicit executable or a pathname to the executable to be used to
- run easy_install for a specific version of Python installed in the
- system. For example V(easy_install-3.3), if there are both Python 2.7
- and 3.3 installations in the system and you want to run easy_install
- for the Python 3.3 installation.
+ - The explicit executable or a pathname to the executable to be used to run easy_install for a specific version of Python
+ installed in the system. For example V(easy_install-3.3), if there are both Python 2.7 and 3.3 installations in the
+ system and you want to run easy_install for the Python 3.3 installation.
default: easy_install
state:
type: str
@@ -64,17 +57,14 @@ options:
choices: [present, latest]
default: present
notes:
- - Please note that the C(easy_install) module can only install Python
- libraries. Thus this module is not able to remove libraries. It is
- generally recommended to use the M(ansible.builtin.pip) module which you can first install
- using M(community.general.easy_install).
- - Also note that C(virtualenv) must be installed on the remote host if the
- O(virtualenv) parameter is specified.
-requirements: [ "virtualenv" ]
+ - Please note that the C(easy_install) module can only install Python libraries. Thus this module is not able to remove
+ libraries. It is generally recommended to use the M(ansible.builtin.pip) module which you can first install using M(community.general.easy_install).
+ - Also note that C(virtualenv) must be installed on the remote host if the O(virtualenv) parameter is specified.
+requirements: ["virtualenv"]
author: "Matt Wright (@mattupstate)"
-'''
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Install or update pip
community.general.easy_install:
name: pip
@@ -84,7 +74,13 @@ EXAMPLES = '''
community.general.easy_install:
name: bottle
virtualenv: /webapps/myapp/venv
-'''
+
+- name: Install a python package using pyvenv as the virtualenv tool
+ community.general.easy_install:
+ name: package_name
+ virtualenv: /opt/myenv
+ virtualenv_command: pyvenv
+"""
import os
import os.path
@@ -137,14 +133,13 @@ def _get_easy_install(module, env=None, executable=None):
def main():
arg_spec = dict(
name=dict(required=True),
- state=dict(required=False,
- default='present',
+ state=dict(default='present',
choices=['present', 'latest'],
type='str'),
- virtualenv=dict(default=None, required=False),
+ virtualenv=dict(),
virtualenv_site_packages=dict(default=False, type='bool'),
- virtualenv_command=dict(default='virtualenv', required=False),
- executable=dict(default='easy_install', required=False),
+ virtualenv_command=dict(default='virtualenv'),
+ executable=dict(default='easy_install'),
)
module = AnsibleModule(argument_spec=arg_spec, supports_check_mode=True)
diff --git a/plugins/modules/ejabberd_user.py b/plugins/modules/ejabberd_user.py
index d0b575e1cd..f93612a516 100644
--- a/plugins/modules/ejabberd_user.py
+++ b/plugins/modules/ejabberd_user.py
@@ -9,57 +9,50 @@ from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
module: ejabberd_user
author: "Peter Sprygada (@privateip)"
short_description: Manages users for ejabberd servers
requirements:
- - ejabberd with mod_admin_extra
+ - ejabberd with mod_admin_extra
description:
- - This module provides user management for ejabberd servers
+ - This module provides user management for ejabberd servers.
extends_documentation_fragment:
- - community.general.attributes
+ - community.general.attributes
attributes:
- check_mode:
- support: full
- diff_mode:
- support: none
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
options:
- username:
- type: str
- description:
- - the name of the user to manage
- required: true
- host:
- type: str
- description:
- - the ejabberd host associated with this username
- required: true
- password:
- type: str
- description:
- - the password to assign to the username
- required: false
- logging:
- description:
- - enables or disables the local syslog facility for this module
- required: false
- default: false
- type: bool
- state:
- type: str
- description:
- - describe the desired state of the user to be managed
- required: false
- default: 'present'
- choices: [ 'present', 'absent' ]
+ username:
+ type: str
+ description:
+ - The name of the user to manage.
+ required: true
+ host:
+ type: str
+ description:
+ - The ejabberd host associated with this username.
+ required: true
+ password:
+ type: str
+ description:
+ - The password to assign to the username.
+ required: false
+ state:
+ type: str
+ description:
+ - Describe the desired state of the user to be managed.
+ required: false
+ default: 'present'
+ choices: ['present', 'absent']
notes:
- - Password parameter is required for state == present only
- - Passwords must be stored in clear text for this release
- - The ejabberd configuration file must include mod_admin_extra as a module.
-'''
-EXAMPLES = '''
+ - Password parameter is required for O(state=present) only.
+ - Passwords must be stored in clear text for this release.
+ - The ejabberd configuration file must include mod_admin_extra as a module.
+"""
+EXAMPLES = r"""
# Example playbook entries using the ejabberd_user module to manage users state.
- name: Create a user if it does not exist
@@ -73,9 +66,7 @@ EXAMPLES = '''
username: test
host: server
state: absent
-'''
-
-import syslog
+"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, cmd_runner_fmt
@@ -91,7 +82,6 @@ class EjabberdUser(object):
def __init__(self, module):
self.module = module
- self.logging = module.params.get('logging')
self.state = module.params.get('state')
self.host = module.params.get('host')
self.user = module.params.get('username')
@@ -125,10 +115,8 @@ class EjabberdUser(object):
return self.run_command('check_account', 'user host', (lambda rc, out, err: not bool(rc)))
def log(self, entry):
- """ This method will log information to the local syslog facility """
- if self.logging:
- syslog.openlog('ansible-%s' % self.module._name)
- syslog.syslog(syslog.LOG_NOTICE, entry)
+ """ This method does nothing """
+ pass
def run_command(self, cmd, options, process=None):
""" This method will run the any command specified and return the
@@ -169,7 +157,6 @@ def main():
username=dict(required=True, type='str'),
password=dict(type='str', no_log=True),
state=dict(default='present', choices=['present', 'absent']),
- logging=dict(default=False, type='bool', removed_in_version='10.0.0', removed_from_collection='community.general'),
),
required_if=[
('state', 'present', ['password']),
diff --git a/plugins/modules/elasticsearch_plugin.py b/plugins/modules/elasticsearch_plugin.py
index 9264bb6471..8552b55ccd 100644
--- a/plugins/modules/elasticsearch_plugin.py
+++ b/plugins/modules/elasticsearch_plugin.py
@@ -9,88 +9,85 @@ from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
module: elasticsearch_plugin
short_description: Manage Elasticsearch plugins
description:
- - Manages Elasticsearch plugins.
+ - Manages Elasticsearch plugins.
author:
- - Mathew Davies (@ThePixelDeveloper)
- - Sam Doran (@samdoran)
+ - Mathew Davies (@ThePixelDeveloper)
+ - Sam Doran (@samdoran)
extends_documentation_fragment:
- - community.general.attributes
+ - community.general.attributes
attributes:
- check_mode:
- support: full
- diff_mode:
- support: none
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
options:
- name:
- description:
- - Name of the plugin to install.
- required: true
- type: str
- state:
- description:
- - Desired state of a plugin.
- choices: ["present", "absent"]
- default: present
- type: str
- src:
- description:
- - Optionally set the source location to retrieve the plugin from. This can be a file://
- URL to install from a local file, or a remote URL. If this is not set, the plugin
- location is just based on the name.
- - The name parameter must match the descriptor in the plugin ZIP specified.
- - Is only used if the state would change, which is solely checked based on the name
- parameter. If, for example, the plugin is already installed, changing this has no
- effect.
- - For ES 1.x use url.
- required: false
- type: str
- url:
- description:
- - Set exact URL to download the plugin from (Only works for ES 1.x).
- - For ES 2.x and higher, use src.
- required: false
- type: str
- timeout:
- description:
- - "Timeout setting: 30s, 1m, 1h..."
- - Only valid for Elasticsearch < 5.0. This option is ignored for Elasticsearch > 5.0.
- default: 1m
- type: str
- force:
- description:
- - "Force batch mode when installing plugins. This is only necessary if a plugin requires additional permissions and console detection fails."
- default: false
- type: bool
- plugin_bin:
- description:
- - Location of the plugin binary. If this file is not found, the default plugin binaries will be used.
- type: path
- plugin_dir:
- description:
- - Your configured plugin directory specified in Elasticsearch
- default: /usr/share/elasticsearch/plugins/
- type: path
- proxy_host:
- description:
- - Proxy host to use during plugin installation
- type: str
- proxy_port:
- description:
- - Proxy port to use during plugin installation
- type: str
- version:
- description:
- - Version of the plugin to be installed.
- If plugin exists with previous version, it will NOT be updated
- type: str
-'''
+ name:
+ description:
+ - Name of the plugin to install.
+ required: true
+ type: str
+ state:
+ description:
+ - Desired state of a plugin.
+ choices: ["present", "absent"]
+ default: present
+ type: str
+ src:
+ description:
+ - Optionally set the source location to retrieve the plugin from. This can be a C(file://) URL to install from a local
+ file, or a remote URL. If this is not set, the plugin location is just based on the name.
+ - The name parameter must match the descriptor in the plugin ZIP specified.
+ - Is only used if the state would change, which is solely checked based on the name parameter. If, for example, the
+ plugin is already installed, changing this has no effect.
+ - For ES 1.x use O(url).
+ required: false
+ type: str
+ url:
+ description:
+ - Set exact URL to download the plugin from (Only works for ES 1.x).
+ - For ES 2.x and higher, use src.
+ required: false
+ type: str
+ timeout:
+ description:
+ - 'Timeout setting: V(30s), V(1m), V(1h)...'
+ - Only valid for Elasticsearch < 5.0. This option is ignored for Elasticsearch > 5.0.
+ default: 1m
+ type: str
+ force:
+ description:
+ - Force batch mode when installing plugins. This is only necessary if a plugin requires additional permissions and console
+ detection fails.
+ default: false
+ type: bool
+ plugin_bin:
+ description:
+ - Location of the plugin binary. If this file is not found, the default plugin binaries are used.
+ type: path
+ plugin_dir:
+ description:
+ - Your configured plugin directory specified in Elasticsearch.
+ default: /usr/share/elasticsearch/plugins/
+ type: path
+ proxy_host:
+ description:
+ - Proxy host to use during plugin installation.
+ type: str
+ proxy_port:
+ description:
+ - Proxy port to use during plugin installation.
+ type: str
+ version:
+ description:
+ - Version of the plugin to be installed. If plugin exists with previous version, it is NOT updated.
+ type: str
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Install Elasticsearch Head plugin in Elasticsearch 2.x
community.general.elasticsearch_plugin:
name: mobz/elasticsearch-head
@@ -116,7 +113,7 @@ EXAMPLES = '''
name: ingest-geoip
state: present
force: true
-'''
+"""
import os
@@ -262,15 +259,15 @@ def main():
argument_spec=dict(
name=dict(required=True),
state=dict(default="present", choices=list(PACKAGE_STATE_MAP.keys())),
- src=dict(default=None),
- url=dict(default=None),
+ src=dict(),
+ url=dict(),
timeout=dict(default="1m"),
force=dict(type='bool', default=False),
plugin_bin=dict(type="path"),
plugin_dir=dict(default="/usr/share/elasticsearch/plugins/", type="path"),
- proxy_host=dict(default=None),
- proxy_port=dict(default=None),
- version=dict(default=None)
+ proxy_host=dict(),
+ proxy_port=dict(),
+ version=dict()
),
mutually_exclusive=[("src", "url")],
supports_check_mode=True
diff --git a/plugins/modules/emc_vnx_sg_member.py b/plugins/modules/emc_vnx_sg_member.py
index b06cd01de3..a0b1e920e2 100644
--- a/plugins/modules/emc_vnx_sg_member.py
+++ b/plugins/modules/emc_vnx_sg_member.py
@@ -12,52 +12,50 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
module: emc_vnx_sg_member
short_description: Manage storage group member on EMC VNX
description:
- - "This module manages the members of an existing storage group."
-
+ - This module manages the members of an existing storage group.
extends_documentation_fragment:
- - community.general.emc.emc_vnx
- - community.general.attributes
+ - community.general.emc.emc_vnx
+ - community.general.attributes
attributes:
- check_mode:
- support: full
- diff_mode:
- support: none
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
options:
- name:
- description:
- - Name of the Storage group to manage.
- required: true
- type: str
- lunid:
- description:
- - Lun id to be added.
- required: true
- type: int
- state:
- description:
- - Indicates the desired lunid state.
- - V(present) ensures specified lunid is present in the Storage Group.
- - V(absent) ensures specified lunid is absent from Storage Group.
- default: present
- choices: [ "present", "absent"]
- type: str
+ name:
+ description:
+ - Name of the Storage group to manage.
+ required: true
+ type: str
+ lunid:
+ description:
+ - LUN ID to be added.
+ required: true
+ type: int
+ state:
+ description:
+ - Indicates the desired lunid state.
+ - V(present) ensures specified O(lunid) is present in the Storage Group.
+ - V(absent) ensures specified O(lunid) is absent from Storage Group.
+ default: present
+ choices: ["present", "absent"]
+ type: str
author:
- - Luca 'remix_tj' Lorenzetto (@remixtj)
-'''
+ - Luca 'remix_tj' Lorenzetto (@remixtj)
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Add lun to storage group
community.general.emc_vnx_sg_member:
name: sg01
@@ -75,14 +73,14 @@ EXAMPLES = '''
sp_password: sysadmin
lunid: 100
state: absent
-'''
+"""
-RETURN = '''
+RETURN = r"""
hluid:
- description: LUNID that hosts attached to the storage group will see.
- type: int
- returned: success
-'''
+ description: LUNID visible to hosts attached to the storage group.
+ type: int
+ returned: success
+"""
import traceback
diff --git a/plugins/modules/etcd3.py b/plugins/modules/etcd3.py
index b1bb181cf4..ce3231d8e0 100644
--- a/plugins/modules/etcd3.py
+++ b/plugins/modules/etcd3.py
@@ -9,84 +9,83 @@ from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
module: etcd3
short_description: Set or delete key value pairs from an etcd3 cluster
requirements:
- etcd3
description:
- - Sets or deletes values in etcd3 cluster using its v3 api.
- - Needs python etcd3 lib to work
+ - Sets or deletes values in etcd3 cluster using its v3 API.
+ - Needs python etcd3 lib to work.
extends_documentation_fragment:
- community.general.attributes
attributes:
- check_mode:
- support: full
- diff_mode:
- support: none
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
options:
- key:
- type: str
- description:
- - the key where the information is stored in the cluster
- required: true
- value:
- type: str
- description:
- - the information stored
- required: true
- host:
- type: str
- description:
- - the IP address of the cluster
- default: 'localhost'
- port:
- type: int
- description:
- - the port number used to connect to the cluster
- default: 2379
- state:
- type: str
- description:
- - the state of the value for the key.
- - can be present or absent
- required: true
- choices: [ present, absent ]
- user:
- type: str
- description:
- - The etcd user to authenticate with.
- password:
- type: str
- description:
- - The password to use for authentication.
- - Required if O(user) is defined.
- ca_cert:
- type: path
- description:
- - The Certificate Authority to use to verify the etcd host.
- - Required if O(client_cert) and O(client_key) are defined.
- client_cert:
- type: path
- description:
- - PEM formatted certificate chain file to be used for SSL client authentication.
- - Required if O(client_key) is defined.
- client_key:
- type: path
- description:
- - PEM formatted file that contains your private key to be used for SSL client authentication.
- - Required if O(client_cert) is defined.
- timeout:
- type: int
- description:
- - The socket level timeout in seconds.
+ key:
+ type: str
+ description:
+ - The key where the information is stored in the cluster.
+ required: true
+ value:
+ type: str
+ description:
+ - The information stored.
+ required: true
+ host:
+ type: str
+ description:
+ - The IP address of the cluster.
+ default: 'localhost'
+ port:
+ type: int
+ description:
+ - The port number used to connect to the cluster.
+ default: 2379
+ state:
+ type: str
+ description:
+ - The state of the value for the key.
+ - Can be present or absent.
+ required: true
+ choices: [present, absent]
+ user:
+ type: str
+ description:
+ - The etcd user to authenticate with.
+ password:
+ type: str
+ description:
+ - The password to use for authentication.
+ - Required if O(user) is defined.
+ ca_cert:
+ type: path
+ description:
+ - The Certificate Authority to use to verify the etcd host.
+ - Required if O(client_cert) and O(client_key) are defined.
+ client_cert:
+ type: path
+ description:
+ - PEM formatted certificate chain file to be used for SSL client authentication.
+ - Required if O(client_key) is defined.
+ client_key:
+ type: path
+ description:
+ - PEM formatted file that contains your private key to be used for SSL client authentication.
+ - Required if O(client_cert) is defined.
+ timeout:
+ type: int
+ description:
+ - The socket level timeout in seconds.
author:
- - Jean-Philippe Evrard (@evrardjp)
- - Victor Fauth (@vfauth)
-'''
+ - Jean-Philippe Evrard (@evrardjp)
+ - Victor Fauth (@vfauth)
+"""
-EXAMPLES = """
+EXAMPLES = r"""
- name: Store a value "bar" under the key "foo" for a cluster located "http://localhost:2379"
community.general.etcd3:
key: "foo"
@@ -114,16 +113,16 @@ EXAMPLES = """
client_key: "/etc/ssl/private/key.pem"
"""
-RETURN = '''
+RETURN = r"""
key:
- description: The key that was queried
- returned: always
- type: str
+ description: The key that was queried.
+ returned: always
+ type: str
old_value:
- description: The previous value in the cluster
- returned: always
- type: str
-'''
+ description: The previous value in the cluster.
+ returned: always
+ type: str
+"""
import traceback
diff --git a/plugins/modules/facter.py b/plugins/modules/facter.py
index 87017246ae..20be3d4a4d 100644
--- a/plugins/modules/facter.py
+++ b/plugins/modules/facter.py
@@ -8,36 +8,38 @@
from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
module: facter
short_description: Runs the discovery program C(facter) on the remote system
description:
- - Runs the C(facter) discovery program
- (U(https://github.com/puppetlabs/facter)) on the remote system, returning
- JSON data that can be useful for inventory purposes.
+ - Runs the C(facter) discovery program (U(https://github.com/puppetlabs/facter)) on the remote system, returning JSON data
+ that can be useful for inventory purposes.
+deprecated:
+ removed_in: 12.0.0
+ why: The module has been replaced by M(community.general.facter_facts).
+ alternative: Use M(community.general.facter_facts) instead.
extends_documentation_fragment:
- - community.general.attributes
+ - community.general.attributes
attributes:
- check_mode:
- support: none
- diff_mode:
- support: none
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
options:
- arguments:
- description:
- - Specifies arguments for facter.
- type: list
- elements: str
+ arguments:
+ description:
+ - Specifies arguments for facter.
+ type: list
+ elements: str
requirements:
- - facter
- - ruby-json
+ - facter
+ - ruby-json
author:
- - Ansible Core Team
- - Michael DeHaan
-'''
+ - Ansible Core Team
+ - Michael DeHaan
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
# Example command-line invocation
# ansible www.example.net -m facter
@@ -47,11 +49,11 @@ EXAMPLES = '''
- name: Execute facter with arguments
community.general.facter:
arguments:
- - -p
- - system_uptime
- - timezone
- - is_virtual
-'''
+ - -p
+ - system_uptime
+ - timezone
+ - is_virtual
+"""
import json
from ansible.module_utils.basic import AnsibleModule
@@ -60,7 +62,7 @@ from ansible.module_utils.basic import AnsibleModule
def main():
module = AnsibleModule(
argument_spec=dict(
- arguments=dict(required=False, type='list', elements='str')
+ arguments=dict(type='list', elements='str')
)
)
diff --git a/plugins/modules/facter_facts.py b/plugins/modules/facter_facts.py
index abc3f87ebe..8f73b37644 100644
--- a/plugins/modules/facter_facts.py
+++ b/plugins/modules/facter_facts.py
@@ -9,47 +9,45 @@
from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
module: facter_facts
short_description: Runs the discovery program C(facter) on the remote system and return Ansible facts
version_added: 8.0.0
description:
- - Runs the C(facter) discovery program
- (U(https://github.com/puppetlabs/facter)) on the remote system, returning Ansible facts from the
- JSON data that can be useful for inventory purposes.
+ - Runs the C(facter) discovery program (U(https://github.com/puppetlabs/facter)) on the remote system, returning Ansible
+ facts from the JSON data that can be useful for inventory purposes.
extends_documentation_fragment:
- - community.general.attributes
- - community.general.attributes.facts
- - community.general.attributes.facts_module
+ - community.general.attributes
+ - community.general.attributes.facts
+ - community.general.attributes.facts_module
options:
- arguments:
- description:
- - Specifies arguments for facter.
- type: list
- elements: str
+ arguments:
+ description:
+ - Specifies arguments for facter.
+ type: list
+ elements: str
requirements:
- - facter
- - ruby-json
+ - facter
+ - ruby-json
author:
- - Ansible Core Team
- - Michael DeHaan
-'''
+ - Ansible Core Team
+ - Michael DeHaan
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Execute facter no arguments
community.general.facter_facts:
- name: Execute facter with arguments
community.general.facter_facts:
arguments:
- - -p
- - system_uptime
- - timezone
- - is_virtual
-'''
+ - -p
+ - system_uptime
+ - timezone
+ - is_virtual
+"""
-RETURN = r'''
+RETURN = r"""
ansible_facts:
description: Dictionary with one key C(facter).
returned: always
@@ -59,7 +57,7 @@ ansible_facts:
description: Dictionary containing facts discovered in the remote system.
returned: always
type: dict
-'''
+"""
import json
diff --git a/plugins/modules/filesize.py b/plugins/modules/filesize.py
index 83de682883..777c00711f 100644
--- a/plugins/modules/filesize.py
+++ b/plugins/modules/filesize.py
@@ -9,17 +9,14 @@ from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = r'''
----
+DOCUMENTATION = r"""
module: filesize
short_description: Create a file with a given size, or resize it if it exists
description:
- - This module is a simple wrapper around C(dd) to create, extend or truncate
- a file, given its size. It can be used to manage swap files (that require
- contiguous blocks) or alternatively, huge sparse files.
-
+ - This module is a simple wrapper around C(dd) to create, extend or truncate a file, given its size. It can be used to manage
+ swap files (that require contiguous blocks) or alternatively, huge sparse files.
author:
- quidame (@quidame)
@@ -40,36 +37,27 @@ options:
size:
description:
- Requested size of the file.
- - The value is a number (either C(int) or C(float)) optionally followed
- by a multiplicative suffix, that can be one of V(B) (bytes), V(KB) or
- V(kB) (= 1000B), V(MB) or V(mB) (= 1000kB), V(GB) or V(gB) (= 1000MB),
- and so on for V(T), V(P), V(E), V(Z) and V(Y); or alternatively one of
- V(K), V(k) or V(KiB) (= 1024B); V(M), V(m) or V(MiB) (= 1024KiB);
+ - The value is a number (either C(int) or C(float)) optionally followed by a multiplicative suffix, that can be one
+ of V(B) (bytes), V(KB) or V(kB) (= 1000B), V(MB) or V(mB) (= 1000kB), V(GB) or V(gB) (= 1000MB), and so on for V(T),
+ V(P), V(E), V(Z) and V(Y); or alternatively one of V(K), V(k) or V(KiB) (= 1024B); V(M), V(m) or V(MiB) (= 1024KiB);
V(G), V(g) or V(GiB) (= 1024MiB); and so on.
- - If the multiplicative suffix is not provided, the value is treated as
- an integer number of blocks of O(blocksize) bytes each (float values
- are rounded to the closest integer).
+ - If the multiplicative suffix is not provided, the value is treated as an integer number of blocks of O(blocksize)
+ bytes each (float values are rounded to the closest integer).
- When the O(size) value is equal to the current file size, does nothing.
- - When the O(size) value is bigger than the current file size, bytes from
- O(source) (if O(sparse) is not V(false)) are appended to the file
- without truncating it, in other words, without modifying the existing
- bytes of the file.
- - When the O(size) value is smaller than the current file size, it is
- truncated to the requested value without modifying bytes before this
- value.
- - That means that a file of any arbitrary size can be grown to any other
- arbitrary size, and then resized down to its initial size without
- modifying its initial content.
+ - When the O(size) value is bigger than the current file size, bytes from O(source) (if O(sparse) is not V(false)) are
+ appended to the file without truncating it, in other words, without modifying the existing bytes of the file.
+ - When the O(size) value is smaller than the current file size, it is truncated to the requested value without modifying
+ bytes before this value.
+ - That means that a file of any arbitrary size can be grown to any other arbitrary size, and then resized down to its
+ initial size without modifying its initial content.
type: raw
required: true
blocksize:
description:
- Size of blocks, in bytes if not followed by a multiplicative suffix.
- - The numeric value (before the unit) B(MUST) be an integer (or a C(float)
- if it equals an integer).
- - If not set, the size of blocks is guessed from the OS and commonly
- results in V(512) or V(4096) bytes, that is used internally by the
- module or when O(size) has no unit.
+ - The numeric value (before the unit) B(MUST) be an integer (or a C(float) if it equals an integer).
+ - If not set, the size of blocks is guessed from the OS and commonly results in V(512) or V(4096) bytes, that is used
+ internally by the module or when O(size) has no unit.
type: raw
source:
description:
@@ -79,26 +67,22 @@ options:
default: /dev/zero
force:
description:
- - Whether or not to overwrite the file if it exists, in other words, to
- truncate it from 0. When V(true), the module is not idempotent, that
- means it always reports C(changed=true).
+ - Whether or not to overwrite the file if it exists, in other words, to truncate it from 0. When V(true), the module
+ is not idempotent, that means it always reports C(changed=true).
- O(force=true) and O(sparse=true) are mutually exclusive.
type: bool
default: false
sparse:
description:
- Whether or not the file to create should be a sparse file.
- - This option is effective only on newly created files, or when growing a
- file, only for the bytes to append.
+ - This option is effective only on newly created files, or when growing a file, only for the bytes to append.
- This option is not supported on OSes or filesystems not supporting sparse files.
- O(force=true) and O(sparse=true) are mutually exclusive.
type: bool
default: false
unsafe_writes:
description:
- - This option is silently ignored. This module always modifies file
- size in-place.
-
+ - This option is silently ignored. This module always modifies file size in-place.
requirements:
- dd (Data Duplicator) in PATH
@@ -138,9 +122,9 @@ seealso:
- name: busybox(1) manpage for Linux
description: Manual page of the GNU/Linux's busybox, that provides its own dd implementation.
link: https://www.unix.com/man-page/linux/1/busybox
-'''
+"""
-EXAMPLES = r'''
+EXAMPLES = r"""
- name: Create a file of 1G filled with null bytes
community.general.filesize:
path: /var/bigfile
@@ -183,9 +167,9 @@ EXAMPLES = r'''
mode: u=rw,go=
owner: root
group: root
-'''
+"""
-RETURN = r'''
+RETURN = r"""
cmd:
description: Command executed to create or resize the file.
type: str
@@ -229,7 +213,7 @@ path:
type: str
sample: /var/swap0
returned: always
-'''
+"""
import re
diff --git a/plugins/modules/filesystem.py b/plugins/modules/filesystem.py
index 73e8c79c6a..f14458c337 100644
--- a/plugins/modules/filesystem.py
+++ b/plugins/modules/filesystem.py
@@ -10,8 +10,7 @@ from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
author:
- Alexander Bulimov (@abulimov)
- quidame (@quidame)
@@ -29,33 +28,29 @@ attributes:
options:
state:
description:
- - If O(state=present), the filesystem is created if it doesn't already
- exist, that is the default behaviour if O(state) is omitted.
- - If O(state=absent), filesystem signatures on O(dev) are wiped if it
- contains a filesystem (as known by C(blkid)).
- - When O(state=absent), all other options but O(dev) are ignored, and the
- module does not fail if the device O(dev) doesn't actually exist.
+ - If O(state=present), the filesystem is created if it does not already exist, that is the default behaviour if O(state)
+ is omitted.
+ - If O(state=absent), filesystem signatures on O(dev) are wiped if it contains a filesystem (as known by C(blkid)).
+ - When O(state=absent), all other options but O(dev) are ignored, and the module does not fail if the device O(dev)
+ does not actually exist.
type: str
- choices: [ present, absent ]
+ choices: [present, absent]
default: present
version_added: 1.3.0
fstype:
- choices: [ bcachefs, btrfs, ext2, ext3, ext4, ext4dev, f2fs, lvm, ocfs2, reiserfs, xfs, vfat, swap, ufs ]
+ choices: [bcachefs, btrfs, ext2, ext3, ext4, ext4dev, f2fs, lvm, ocfs2, reiserfs, xfs, vfat, swap, ufs]
description:
- - Filesystem type to be created. This option is required with
- O(state=present) (or if O(state) is omitted).
- - ufs support has been added in community.general 3.4.0.
- - bcachefs support has been added in community.general 8.6.0.
+ - Filesystem type to be created. This option is required with O(state=present) (or if O(state) is omitted).
+ - Ufs support has been added in community.general 3.4.0.
+ - Bcachefs support has been added in community.general 8.6.0.
type: str
aliases: [type]
dev:
description:
- - Target path to block device (Linux) or character device (FreeBSD) or
- regular file (both).
- - When setting Linux-specific filesystem types on FreeBSD, this module
- only works when applying to regular files, aka disk images.
- - Currently V(lvm) (Linux-only) and V(ufs) (FreeBSD-only) do not support
- a regular file as their target O(dev).
+ - Target path to block device (Linux) or character device (FreeBSD) or regular file (both).
+ - When setting Linux-specific filesystem types on FreeBSD, this module only works when applying to regular files, aka
+ disk images.
+ - Currently V(lvm) (Linux-only) and V(ufs) (FreeBSD-only) do not support a regular file as their target O(dev).
- Support for character devices on FreeBSD has been added in community.general 3.4.0.
type: path
required: true
@@ -68,12 +63,11 @@ options:
resizefs:
description:
- If V(true), if the block device and filesystem size differ, grow the filesystem into the space.
- - Supported for C(bcachefs), C(btrfs), C(ext2), C(ext3), C(ext4), C(ext4dev), C(f2fs), C(lvm), C(xfs), C(ufs) and C(vfat) filesystems.
- Attempts to resize other filesystem types will fail.
- - XFS Will only grow if mounted. Currently, the module is based on commands
- from C(util-linux) package to perform operations, so resizing of XFS is
- not supported on FreeBSD systems.
- - vFAT will likely fail if C(fatresize < 1.04).
+ - Supported for C(bcachefs), C(btrfs), C(ext2), C(ext3), C(ext4), C(ext4dev), C(f2fs), C(lvm), C(xfs), C(ufs) and C(vfat)
+ filesystems. Attempts to resize other filesystem types fail.
+ - XFS only grows if mounted. Currently, the module is based on commands from C(util-linux) package to perform operations,
+ so resizing of XFS is not supported on FreeBSD systems.
+ - VFAT is likely to fail if C(fatresize < 1.04).
- Mutually exclusive with O(uuid).
type: bool
default: false
@@ -88,37 +82,33 @@ options:
- See xfs_admin(8) (C(xfs)), tune2fs(8) (C(ext2), C(ext3), C(ext4), C(ext4dev)) for possible values.
- For O(fstype=lvm) the value is ignored, it resets the PV UUID if set.
- Supported for O(fstype) being one of C(bcachefs), C(ext2), C(ext3), C(ext4), C(ext4dev), C(lvm), or C(xfs).
- - This is B(not idempotent). Specifying this option will always result in a change.
+ - This is B(not idempotent). Specifying this option always results in a change.
- Mutually exclusive with O(resizefs).
type: str
version_added: 7.1.0
requirements:
- - Uses specific tools related to the O(fstype) for creating or resizing a
- filesystem (from packages e2fsprogs, xfsprogs, dosfstools, and so on).
- - Uses generic tools mostly related to the Operating System (Linux or
- FreeBSD) or available on both, as C(blkid).
+ - Uses specific tools related to the O(fstype) for creating or resizing a filesystem (from packages e2fsprogs, xfsprogs,
+ dosfstools, and so on).
+ - Uses generic tools mostly related to the Operating System (Linux or FreeBSD) or available on both, as C(blkid).
- On FreeBSD, either C(util-linux) or C(e2fsprogs) package is required.
notes:
- - Potential filesystems on O(dev) are checked using C(blkid). In case C(blkid)
- is unable to detect a filesystem (and in case C(fstyp) on FreeBSD is also
- unable to detect a filesystem), this filesystem is overwritten even if
- O(force) is V(false).
- - On FreeBSD systems, both C(e2fsprogs) and C(util-linux) packages provide
- a C(blkid) command that is compatible with this module. However, these
- packages conflict with each other, and only the C(util-linux) package
- provides the command required to not fail when O(state=absent).
+ - Potential filesystems on O(dev) are checked using C(blkid). In case C(blkid) is unable to detect a filesystem (and in
+ case C(fstyp) on FreeBSD is also unable to detect a filesystem), this filesystem is overwritten even if O(force) is V(false).
+ - On FreeBSD systems, both C(e2fsprogs) and C(util-linux) packages provide a C(blkid) command that is compatible with this
+ module. However, these packages conflict with each other, and only the C(util-linux) package provides the command required
+ to not fail when O(state=absent).
seealso:
- module: community.general.filesize
- module: ansible.posix.mount
- name: xfs_admin(8) manpage for Linux
- description: Manual page of the GNU/Linux's xfs_admin implementation
+ description: Manual page of the GNU/Linux's xfs_admin implementation.
link: https://man7.org/linux/man-pages/man8/xfs_admin.8.html
- name: tune2fs(8) manpage for Linux
- description: Manual page of the GNU/Linux's tune2fs implementation
+ description: Manual page of the GNU/Linux's tune2fs implementation.
link: https://man7.org/linux/man-pages/man8/tune2fs.8.html
-'''
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Create a ext2 filesystem on /dev/sdb1
community.general.filesystem:
fstype: ext2
@@ -157,7 +147,7 @@ EXAMPLES = '''
fstype: lvm
dev: /dev/sdc
uuid: random
-'''
+"""
import os
import platform
@@ -643,7 +633,7 @@ def main():
opts=dict(type='str'),
force=dict(type='bool', default=False),
resizefs=dict(type='bool', default=False),
- uuid=dict(type='str', required=False),
+ uuid=dict(type='str'),
),
required_if=[
('state', 'present', ['fstype'])
diff --git a/plugins/modules/flatpak.py b/plugins/modules/flatpak.py
index 84e4ea8374..98de9de3ed 100644
--- a/plugins/modules/flatpak.py
+++ b/plugins/modules/flatpak.py
@@ -10,8 +10,7 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
-DOCUMENTATION = r'''
----
+DOCUMENTATION = r"""
module: flatpak
short_description: Manage flatpaks
description:
@@ -28,72 +27,66 @@ attributes:
check_mode:
support: partial
details:
- - If O(state=latest), the module will always return C(changed=true).
+ - If O(state=latest), the module always returns RV(ignore:changed=true).
diff_mode:
support: none
options:
executable:
description:
- - The path to the C(flatpak) executable to use.
- - By default, this module looks for the C(flatpak) executable on the path.
+ - The path to the C(flatpak) executable to use.
+ - By default, this module looks for the C(flatpak) executable on the path.
type: path
default: flatpak
method:
description:
- - The installation method to use.
- - Defines if the C(flatpak) is supposed to be installed globally for the whole V(system)
- or only for the current V(user).
+ - The installation method to use.
+ - Defines if the C(flatpak) is supposed to be installed globally for the whole V(system) or only for the current V(user).
type: str
- choices: [ system, user ]
+ choices: [system, user]
default: system
name:
description:
- - The name of the flatpak to manage. To operate on several packages this
- can accept a list of packages.
- - When used with O(state=present), O(name) can be specified as a URL to a
- C(flatpakref) file or the unique reverse DNS name that identifies a flatpak.
- - Both C(https://) and C(http://) URLs are supported.
- - When supplying a reverse DNS name, you can use the O(remote) option to specify on what remote
- to look for the flatpak. An example for a reverse DNS name is C(org.gnome.gedit).
- - When used with O(state=absent) or O(state=latest), it is recommended to specify the name in
- the reverse DNS format.
- - When supplying a URL with O(state=absent) or O(state=latest), the module will try to match the
- installed flatpak based on the name of the flatpakref to remove or update it. However, there
- is no guarantee that the names of the flatpakref file and the reverse DNS name of the
- installed flatpak do match.
+ - The name of the flatpak to manage. To operate on several packages this can accept a list of packages.
+ - When used with O(state=present), O(name) can be specified as a URL to a C(flatpakref) file or the unique reverse DNS
+ name that identifies a flatpak.
+ - Both C(https://) and C(http://) URLs are supported.
+ - When supplying a reverse DNS name, you can use the O(remote) option to specify on what remote to look for the flatpak.
+ An example for a reverse DNS name is C(org.gnome.gedit).
+ - When used with O(state=absent) or O(state=latest), it is recommended to specify the name in the reverse DNS format.
+ - When supplying a URL with O(state=absent) or O(state=latest), the module tries to match the installed flatpak based
+ on the name of the flatpakref to remove or update it. However, there is no guarantee that the names of the flatpakref
+ file and the reverse DNS name of the installed flatpak do match.
type: list
elements: str
required: true
no_dependencies:
description:
- - If installing runtime dependencies should be omitted or not
- - This parameter is primarily implemented for integration testing this module.
- There might however be some use cases where you would want to have this, like when you are
- packaging your own flatpaks.
+ - If installing runtime dependencies should be omitted or not.
+ - This parameter is primarily implemented for integration testing this module. There might however be some use cases
+ where you would want to have this, like when you are packaging your own flatpaks.
type: bool
default: false
version_added: 3.2.0
remote:
description:
- - The flatpak remote (repository) to install the flatpak from.
- - By default, V(flathub) is assumed, but you do need to add the flathub flatpak_remote before
- you can use this.
- - See the M(community.general.flatpak_remote) module for managing flatpak remotes.
+ - The flatpak remote (repository) to install the flatpak from.
+ - By default, V(flathub) is assumed, but you do need to add the flathub flatpak_remote before you can use this.
+ - See the M(community.general.flatpak_remote) module for managing flatpak remotes.
type: str
default: flathub
state:
description:
- - Indicates the desired package state.
- - The value V(latest) is supported since community.general 8.6.0.
- choices: [ absent, present, latest ]
+ - Indicates the desired package state.
+ - The value V(latest) is supported since community.general 8.6.0.
+ choices: [absent, present, latest]
type: str
default: present
-'''
+"""
-EXAMPLES = r'''
+EXAMPLES = r"""
- name: Install the spotify flatpak
community.general.flatpak:
- name: https://s3.amazonaws.com/alexlarsson/spotify-repo/spotify.flatpakref
+ name: https://s3.amazonaws.com/alexlarsson/spotify-repo/spotify.flatpakref
state: present
- name: Install the gedit flatpak package without dependencies (not recommended)
@@ -114,6 +107,12 @@ EXAMPLES = r'''
state: present
remote: gnome
+- name: Install GIMP using custom flatpak binary path
+ community.general.flatpak:
+ name: org.gimp.GIMP
+ state: present
+ executable: /usr/local/bin/flatpak-dev
+
- name: Install multiple packages
community.general.flatpak:
name:
@@ -123,7 +122,7 @@ EXAMPLES = r'''
- name: Update the spotify flatpak
community.general.flatpak:
- name: https://s3.amazonaws.com/alexlarsson/spotify-repo/spotify.flatpakref
+ name: https://s3.amazonaws.com/alexlarsson/spotify-repo/spotify.flatpakref
state: latest
- name: Update the gedit flatpak package without dependencies (not recommended)
@@ -164,35 +163,15 @@ EXAMPLES = r'''
- org.inkscape.Inkscape
- org.mozilla.firefox
state: absent
-'''
+"""
-RETURN = r'''
+RETURN = r"""
command:
- description: The exact flatpak command that was executed
+ description: The exact flatpak command that was executed.
returned: When a flatpak command has been executed
type: str
sample: "/usr/bin/flatpak install --user --nontinteractive flathub org.gnome.Calculator"
-msg:
- description: Module error message
- returned: failure
- type: str
- sample: "Executable '/usr/local/bin/flatpak' was not found on the system."
-rc:
- description: Return code from flatpak binary
- returned: When a flatpak command has been executed
- type: int
- sample: 0
-stderr:
- description: Error output from flatpak binary
- returned: When a flatpak command has been executed
- type: str
- sample: "error: Error searching remote flathub: Can't find ref org.gnome.KDE"
-stdout:
- description: Output from flatpak binary
- returned: When a flatpak command has been executed
- type: str
- sample: "org.gnome.Calendar/x86_64/stable\tcurrent\norg.gnome.gitg/x86_64/stable\tcurrent\n"
-'''
+"""
from ansible.module_utils.six.moves.urllib.parse import urlparse
from ansible.module_utils.basic import AnsibleModule
diff --git a/plugins/modules/flatpak_remote.py b/plugins/modules/flatpak_remote.py
index a4eb3ea27c..641ce930d0 100644
--- a/plugins/modules/flatpak_remote.py
+++ b/plugins/modules/flatpak_remote.py
@@ -10,16 +10,14 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
-DOCUMENTATION = r'''
----
+DOCUMENTATION = r"""
module: flatpak_remote
short_description: Manage flatpak repository remotes
description:
- Allows users to add or remove flatpak remotes.
- - The flatpak remotes concept is comparable to what is called repositories in other packaging
- formats.
- - Currently, remote addition is only supported via C(flatpakrepo) file URLs.
- - Existing remotes will not be updated.
+ - The flatpak remotes concept is comparable to what is called repositories in other packaging formats.
+ - Currently, remote addition is only supported using C(flatpakrepo) file URLs.
+ - Existing remotes are not updated.
- See the M(community.general.flatpak) module for managing flatpaks.
author:
- John Kwiatkoski (@JayKayy)
@@ -36,49 +34,47 @@ attributes:
options:
executable:
description:
- - The path to the C(flatpak) executable to use.
- - By default, this module looks for the C(flatpak) executable on the path.
+ - The path to the C(flatpak) executable to use.
+ - By default, this module looks for the C(flatpak) executable on the path.
type: str
default: flatpak
flatpakrepo_url:
description:
- - The URL to the C(flatpakrepo) file representing the repository remote to add.
- - When used with O(state=present), the flatpak remote specified under the O(flatpakrepo_url)
- is added using the specified installation O(method).
- - When used with O(state=absent), this is not required.
- - Required when O(state=present).
+ - The URL to the C(flatpakrepo) file representing the repository remote to add.
+ - When used with O(state=present), the flatpak remote specified under the O(flatpakrepo_url) is added using the specified
+ installation O(method).
+ - When used with O(state=absent), this is not required.
+ - Required when O(state=present).
type: str
method:
description:
- - The installation method to use.
- - Defines if the C(flatpak) is supposed to be installed globally for the whole V(system)
- or only for the current V(user).
+ - The installation method to use.
+ - Defines if the C(flatpak) is supposed to be installed globally for the whole V(system) or only for the current V(user).
type: str
- choices: [ system, user ]
+ choices: [system, user]
default: system
name:
description:
- - The desired name for the flatpak remote to be registered under on the managed host.
- - When used with O(state=present), the remote will be added to the managed host under
- the specified O(name).
- - When used with O(state=absent) the remote with that name will be removed.
+ - The desired name for the flatpak remote to be registered under on the managed host.
+ - When used with O(state=present), the remote is added to the managed host under the specified O(name).
+ - When used with O(state=absent) the remote with that name is removed.
type: str
required: true
state:
description:
- - Indicates the desired package state.
+ - Indicates the desired package state.
type: str
- choices: [ absent, present ]
+ choices: [absent, present]
default: present
enabled:
description:
- - Indicates whether this remote is enabled.
+ - Indicates whether this remote is enabled.
type: bool
default: true
version_added: 6.4.0
-'''
+"""
-EXAMPLES = r'''
+EXAMPLES = r"""
- name: Add the Gnome flatpak remote to the system installation
community.general.flatpak_remote:
name: gnome
@@ -108,35 +104,15 @@ EXAMPLES = r'''
name: flathub
state: present
enabled: false
-'''
+"""
-RETURN = r'''
+RETURN = r"""
command:
- description: The exact flatpak command that was executed
+ description: The exact flatpak command that was executed.
returned: When a flatpak command has been executed
type: str
sample: "/usr/bin/flatpak remote-add --system flatpak-test https://dl.flathub.org/repo/flathub.flatpakrepo"
-msg:
- description: Module error message
- returned: failure
- type: str
- sample: "Executable '/usr/local/bin/flatpak' was not found on the system."
-rc:
- description: Return code from flatpak binary
- returned: When a flatpak command has been executed
- type: int
- sample: 0
-stderr:
- description: Error output from flatpak binary
- returned: When a flatpak command has been executed
- type: str
- sample: "error: GPG verification enabled, but no summary found (check that the configured URL in remote config is correct)\n"
-stdout:
- description: Output from flatpak binary
- returned: When a flatpak command has been executed
- type: str
- sample: "flathub\tFlathub\thttps://dl.flathub.org/repo/\t1\t\n"
-'''
+"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.common.text.converters import to_bytes, to_native
diff --git a/plugins/modules/gconftool2.py b/plugins/modules/gconftool2.py
index 2f66448520..ef5b4381c2 100644
--- a/plugins/modules/gconftool2.py
+++ b/plugins/modules/gconftool2.py
@@ -15,8 +15,8 @@ author:
- Kenneth D. Evensen (@kevensen)
short_description: Edit GNOME Configurations
description:
- - This module allows for the manipulation of GNOME 2 Configuration using C(gconftool-2). Please see the gconftool-2(1) man pages
- for more details.
+ - This module allows for the manipulation of GNOME 2 Configuration using C(gconftool-2). Please see the gconftool-2(1) man
+ pages for more details.
seealso:
- name: C(gconftool-2) command manual page
description: Manual page for the command.
@@ -96,6 +96,12 @@ previous_value:
returned: success
type: str
sample: "Serif 12"
+version:
+ description: Version of gconftool-2.
+ type: str
+ returned: always
+ sample: "3.2.6"
+ version_added: 10.0.0
"""
from ansible_collections.community.general.plugins.module_utils.module_helper import StateModuleHelper
@@ -122,13 +128,16 @@ class GConftool(StateModuleHelper):
],
supports_check_mode=True,
)
- use_old_vardict = False
def __init_module__(self):
self.runner = gconftool2_runner(self.module, check_rc=True)
if not self.vars.direct and self.vars.config_source is not None:
self.do_raise('If the "config_source" is specified then "direct" must be "true"')
+ with self.runner("version") as ctx:
+ rc, out, err = ctx.run()
+ self.vars.version = out.strip()
+
self.vars.set('previous_value', self._get(), fact=True)
self.vars.set('value_type', self.vars.value_type)
self.vars.set('_value', self.vars.previous_value, output=False, change=True)
diff --git a/plugins/modules/gconftool2_info.py b/plugins/modules/gconftool2_info.py
index 3c1baf9a65..3f6aa7b2e3 100644
--- a/plugins/modules/gconftool2_info.py
+++ b/plugins/modules/gconftool2_info.py
@@ -47,6 +47,12 @@ value:
returned: success
type: str
sample: Monospace 10
+version:
+ description: Version of gconftool-2.
+ type: str
+ returned: always
+ sample: "3.2.6"
+ version_added: 10.0.0
"""
from ansible_collections.community.general.plugins.module_utils.module_helper import ModuleHelper
@@ -61,10 +67,12 @@ class GConftoolInfo(ModuleHelper):
),
supports_check_mode=True,
)
- use_old_vardict = False
def __init_module__(self):
self.runner = gconftool2_runner(self.module, check_rc=True)
+ with self.runner("version") as ctx:
+ rc, out, err = ctx.run()
+ self.vars.version = out.strip()
def __run__(self):
with self.runner.context(args_order=["state", "key"]) as ctx:
diff --git a/plugins/modules/gem.py b/plugins/modules/gem.py
index c01433cb90..1ea9c68a94 100644
--- a/plugins/modules/gem.py
+++ b/plugins/modules/gem.py
@@ -48,7 +48,7 @@ options:
repository:
type: str
description:
- - The repository from which the gem will be installed.
+ - The repository from which the gem is installed.
required: false
aliases: [source]
user_install:
@@ -65,7 +65,7 @@ options:
install_dir:
type: path
description:
- - Install the gems into a specific directory. These gems will be independent from the global installed ones. Specifying
+ - Install the gems into a specific directory. These gems are independent from the global installed ones. Specifying
this requires user_install to be false.
required: false
bindir:
@@ -295,22 +295,22 @@ def main():
module = AnsibleModule(
argument_spec=dict(
- executable=dict(required=False, type='path'),
- gem_source=dict(required=False, type='path'),
- include_dependencies=dict(required=False, default=True, type='bool'),
+ executable=dict(type='path'),
+ gem_source=dict(type='path'),
+ include_dependencies=dict(default=True, type='bool'),
name=dict(required=True, type='str'),
- repository=dict(required=False, aliases=['source'], type='str'),
- state=dict(required=False, default='present', choices=['present', 'absent', 'latest'], type='str'),
- user_install=dict(required=False, default=True, type='bool'),
- install_dir=dict(required=False, type='path'),
+ repository=dict(aliases=['source'], type='str'),
+ state=dict(default='present', choices=['present', 'absent', 'latest'], type='str'),
+ user_install=dict(default=True, type='bool'),
+ install_dir=dict(type='path'),
bindir=dict(type='path'),
norc=dict(type='bool', default=True),
- pre_release=dict(required=False, default=False, type='bool'),
- include_doc=dict(required=False, default=False, type='bool'),
- env_shebang=dict(required=False, default=False, type='bool'),
- version=dict(required=False, type='str'),
- build_flags=dict(required=False, type='str'),
- force=dict(required=False, default=False, type='bool'),
+ pre_release=dict(default=False, type='bool'),
+ include_doc=dict(default=False, type='bool'),
+ env_shebang=dict(default=False, type='bool'),
+ version=dict(type='str'),
+ build_flags=dict(type='str'),
+ force=dict(default=False, type='bool'),
),
supports_check_mode=True,
mutually_exclusive=[['gem_source', 'repository'], ['gem_source', 'version']],
diff --git a/plugins/modules/gio_mime.py b/plugins/modules/gio_mime.py
index 8ac4b34838..b8864ea3e3 100644
--- a/plugins/modules/gio_mime.py
+++ b/plugins/modules/gio_mime.py
@@ -26,12 +26,12 @@ attributes:
options:
mime_type:
description:
- - MIME type for which a default handler will be set.
+ - MIME type for which a default handler is set.
type: str
required: true
handler:
description:
- - Default handler will be set for the MIME type.
+ - Default handler set for the MIME type.
type: str
required: true
notes:
@@ -61,18 +61,12 @@ handler:
returned: success
type: str
sample: google-chrome.desktop
-stdout:
- description:
- - The output of the C(gio) command.
- returned: success
+version:
+ description: Version of gio.
type: str
- sample: Set google-chrome.desktop as the default for x-scheme-handler/https
-stderr:
- description:
- - The error output of the C(gio) command.
- returned: failure
- type: str
- sample: 'gio: Failed to load info for handler "never-existed.desktop"'
+ returned: always
+ sample: "2.80.0"
+ version_added: 10.0.0
"""
from ansible_collections.community.general.plugins.module_utils.module_helper import ModuleHelper
@@ -88,21 +82,22 @@ class GioMime(ModuleHelper):
),
supports_check_mode=True,
)
- use_old_vardict = False
def __init_module__(self):
self.runner = gio_mime_runner(self.module, check_rc=True)
+ with self.runner("version") as ctx:
+ rc, out, err = ctx.run()
+ self.vars.version = out.strip()
self.vars.set_meta("handler", initial_value=gio_mime_get(self.runner, self.vars.mime_type), diff=True, change=True)
def __run__(self):
check_mode_return = (0, 'Module executed in check mode', '')
if self.vars.has_changed:
- with self.runner.context(args_order=["mime_type", "handler"], check_mode_skip=True, check_mode_return=check_mode_return) as ctx:
+ with self.runner.context(args_order="mime mime_type handler", check_mode_skip=True, check_mode_return=check_mode_return) as ctx:
rc, out, err = ctx.run()
self.vars.stdout = out
self.vars.stderr = err
- if self.verbosity >= 4:
- self.vars.run_info = ctx.run_info
+ self.vars.set("run_info", ctx.run_info, verbosity=4)
def main():
diff --git a/plugins/modules/git_config.py b/plugins/modules/git_config.py
index 6a6eff0be2..93ca6265b9 100644
--- a/plugins/modules/git_config.py
+++ b/plugins/modules/git_config.py
@@ -31,17 +31,11 @@ attributes:
diff_mode:
support: none
options:
- list_all:
- description:
- - List all settings (optionally limited to a given O(scope)).
- - This option is B(deprecated) and will be removed from community.general 11.0.0. Please use M(community.general.git_config_info)
- instead.
- type: bool
- default: false
name:
description:
- - The name of the setting. If no value is supplied, the value will be read from the config if it has been set.
+ - The name of the setting.
type: str
+ required: true
repo:
description:
- Path to a git repository for reading and writing values from a specific repo.
@@ -57,7 +51,7 @@ options:
- This is required when setting config values.
- If this is set to V(local), you must also specify the O(repo) parameter.
- If this is set to V(file), you must also specify the O(file) parameter.
- - It defaults to system only when not using O(list_all=true).
+ - It defaults to system.
choices: ["file", "local", "global", "system"]
type: str
state:
@@ -70,7 +64,7 @@ options:
value:
description:
- When specifying the name of a single setting, supply a value to set that setting to the given value.
- - From community.general 11.0.0 on, O(value) will be required if O(state=present). To read values, use the M(community.general.git_config_info)
+ - From community.general 11.0.0 on, O(value) is required if O(state=present). To read values, use the M(community.general.git_config_info)
module instead.
type: str
add_mode:
@@ -144,21 +138,6 @@ EXAMPLES = r"""
"""
RETURN = r"""
-config_value:
- description: When O(list_all=false) and value is not set, a string containing the value of the setting in name.
- returned: success
- type: str
- sample: "vim"
-
-config_values:
- description: When O(list_all=true), a dict containing key/value pairs of multiple configuration settings.
- returned: success
- type: dict
- sample:
- core.editor: "vim"
- color.ui: "auto"
- alias.diffc: "diff --cached"
- alias.remotev: "remote -v"
"""
from ansible.module_utils.basic import AnsibleModule
@@ -167,21 +146,19 @@ from ansible.module_utils.basic import AnsibleModule
def main():
module = AnsibleModule(
argument_spec=dict(
- list_all=dict(required=False, type='bool', default=False, removed_in_version='11.0.0', removed_from_collection='community.general'),
- name=dict(type='str'),
+ name=dict(type='str', required=True),
repo=dict(type='path'),
file=dict(type='path'),
- add_mode=dict(required=False, type='str', default='replace-all', choices=['add', 'replace-all']),
- scope=dict(required=False, type='str', choices=['file', 'local', 'global', 'system']),
- state=dict(required=False, type='str', default='present', choices=['present', 'absent']),
- value=dict(required=False),
+ add_mode=dict(type='str', default='replace-all', choices=['add', 'replace-all']),
+ scope=dict(type='str', choices=['file', 'local', 'global', 'system']),
+ state=dict(type='str', default='present', choices=['present', 'absent']),
+ value=dict(),
),
- mutually_exclusive=[['list_all', 'name'], ['list_all', 'value'], ['list_all', 'state']],
required_if=[
('scope', 'local', ['repo']),
- ('scope', 'file', ['file'])
+ ('scope', 'file', ['file']),
+ ('state', 'present', ['value']),
],
- required_one_of=[['list_all', 'name']],
supports_check_mode=True,
)
git_path = module.get_bin_path('git', True)
@@ -196,13 +173,8 @@ def main():
new_value = params['value'] or ''
add_mode = params['add_mode']
- if not unset and not new_value and not params['list_all']:
- module.deprecate(
- 'If state=present, a value must be specified from community.general 11.0.0 on.'
- ' To read a config value, use the community.general.git_config_info module instead.',
- version='11.0.0',
- collection_name='community.general',
- )
+ if not unset and not new_value:
+ module.fail_json(msg="If state=present, a value must be specified. Use the community.general.git_config_info module to read a config value.")
scope = determine_scope(params)
cwd = determine_cwd(scope, params)
@@ -217,33 +189,18 @@ def main():
list_args = list(base_args)
- if params['list_all']:
- list_args.append('-l')
-
- if name:
- list_args.append("--get-all")
- list_args.append(name)
+ list_args.append("--get-all")
+ list_args.append(name)
(rc, out, err) = module.run_command(list_args, cwd=cwd, expand_user_and_vars=False)
- if params['list_all'] and scope and rc == 128 and 'unable to read config file' in err:
- # This just means nothing has been set at the given scope
- module.exit_json(changed=False, msg='', config_values={})
- elif rc >= 2:
+ if rc >= 2:
# If the return code is 1, it just means the option hasn't been set yet, which is fine.
module.fail_json(rc=rc, msg=err, cmd=' '.join(list_args))
old_values = out.rstrip().splitlines()
- if params['list_all']:
- config_values = {}
- for value in old_values:
- k, v = value.split('=', 1)
- config_values[k] = v
- module.exit_json(changed=False, msg='', config_values=config_values)
- elif not new_value and not unset:
- module.exit_json(changed=False, msg='', config_value=old_values[0] if old_values else '')
- elif unset and not out:
+ if unset and not out:
module.exit_json(changed=False, msg='no setting to unset')
elif new_value in old_values and (len(old_values) == 1 or add_mode == "add") and not unset:
module.exit_json(changed=False, msg="")
@@ -286,30 +243,22 @@ def main():
def determine_scope(params):
if params['scope']:
return params['scope']
- elif params['list_all']:
- return ""
- else:
- return 'system'
+ return 'system'
def build_diff_value(value):
if not value:
return "\n"
- elif len(value) == 1:
+ if len(value) == 1:
return value[0] + "\n"
- else:
- return value
+ return value
def determine_cwd(scope, params):
if scope == 'local':
return params['repo']
- elif params['list_all'] and params['repo']:
- # Include local settings from a specific repo when listing all available settings
- return params['repo']
- else:
- # Run from root directory to avoid accidentally picking up any local config settings
- return "/"
+ # Run from root directory to avoid accidentally picking up any local config settings
+ return "/"
if __name__ == '__main__':
diff --git a/plugins/modules/git_config_info.py b/plugins/modules/git_config_info.py
index c8152cfa42..29922382de 100644
--- a/plugins/modules/git_config_info.py
+++ b/plugins/modules/git_config_info.py
@@ -26,7 +26,7 @@ options:
name:
description:
- The name of the setting to read.
- - If not provided, all settings will be returned as RV(config_values).
+ - If not provided, all settings are returned as RV(config_values).
type: str
path:
description:
@@ -94,8 +94,8 @@ config_values:
description:
- This is a dictionary mapping a git configuration setting to a list of its values.
- When O(name) is not set, all configuration settings are returned here.
- - When O(name) is set, only the setting specified in O(name) is returned here. If that setting is not set, the key will
- still be present, and its value will be an empty list.
+ - When O(name) is set, only the setting specified in O(name) is returned here. If that setting is not set, the key is
+ still present, and its value is an empty list.
returned: success
type: dict
sample:
@@ -113,7 +113,7 @@ def main():
argument_spec=dict(
name=dict(type="str"),
path=dict(type="path"),
- scope=dict(required=False, type="str", default="system", choices=["global", "system", "local", "file"]),
+ scope=dict(type="str", default="system", choices=["global", "system", "local", "file"]),
),
required_if=[
("scope", "local", ["path"]),
diff --git a/plugins/modules/github_deploy_key.py b/plugins/modules/github_deploy_key.py
index 2e5f9125ad..4ec7fbb769 100644
--- a/plugins/modules/github_deploy_key.py
+++ b/plugins/modules/github_deploy_key.py
@@ -57,8 +57,8 @@ options:
type: str
read_only:
description:
- - If V(true), the deploy key will only be able to read repository contents. Otherwise, the deploy key will be able to
- read and write.
+ - If V(true), the deploy key is only able to read repository contents. Otherwise, the deploy key is able to read and
+ write.
type: bool
default: true
state:
@@ -301,18 +301,18 @@ class GithubDeployKey(object):
def main():
module = AnsibleModule(
argument_spec=dict(
- github_url=dict(required=False, type='str', default="https://api.github.com"),
+ github_url=dict(type='str', default="https://api.github.com"),
owner=dict(required=True, type='str', aliases=['account', 'organization']),
repo=dict(required=True, type='str', aliases=['repository']),
name=dict(required=True, type='str', aliases=['title', 'label']),
key=dict(required=True, type='str', no_log=False),
- read_only=dict(required=False, type='bool', default=True),
+ read_only=dict(type='bool', default=True),
state=dict(default='present', choices=['present', 'absent']),
- force=dict(required=False, type='bool', default=False),
- username=dict(required=False, type='str'),
- password=dict(required=False, type='str', no_log=True),
- otp=dict(required=False, type='int', no_log=True),
- token=dict(required=False, type='str', no_log=True)
+ force=dict(type='bool', default=False),
+ username=dict(type='str'),
+ password=dict(type='str', no_log=True),
+ otp=dict(type='int', no_log=True),
+ token=dict(type='str', no_log=True)
),
mutually_exclusive=[
['password', 'token']
diff --git a/plugins/modules/github_key.py b/plugins/modules/github_key.py
index f3d5863d54..80b0a6bf70 100644
--- a/plugins/modules/github_key.py
+++ b/plugins/modules/github_key.py
@@ -14,6 +14,7 @@ module: github_key
short_description: Manage GitHub access keys
description:
- Creates, removes, or updates GitHub access keys.
+ - Works with both GitHub.com and GitHub Enterprise Server installations.
extends_documentation_fragment:
- community.general.attributes
attributes:
@@ -44,10 +45,16 @@ options:
type: str
force:
description:
- - The default is V(true), which will replace the existing remote key if it is different than O(pubkey). If V(false),
- the key will only be set if no key with the given O(name) exists.
+ - The default is V(true), which replaces the existing remote key if it is different than O(pubkey). If V(false), the
+ key is only set if no key with the given O(name) exists.
type: bool
default: true
+ api_url:
+ description:
+ - URL to the GitHub API if not using github.com but your own GitHub Enterprise instance.
+ type: str
+ default: 'https://api.github.com'
+ version_added: "11.0.0"
author: Robert Estelle (@erydo)
"""
@@ -57,20 +64,42 @@ deleted_keys:
description: An array of key objects that were deleted. Only present on state=absent.
type: list
returned: When state=absent
- sample: [{'id': 0, 'key': 'BASE64 encoded key', 'url': 'http://example.com/github key', 'created_at': 'YYYY-MM-DDTHH:MM:SZ',
- 'read_only': false}]
+ sample:
+ [
+ {
+ "id": 0,
+ "key": "BASE64 encoded key",
+ "url": "http://example.com/github key",
+ "created_at": "YYYY-MM-DDTHH:MM:SZ",
+ "read_only": false
+ }
+ ]
matching_keys:
description: An array of keys matching the specified name. Only present on state=present.
type: list
returned: When state=present
- sample: [{'id': 0, 'key': 'BASE64 encoded key', 'url': 'http://example.com/github key', 'created_at': 'YYYY-MM-DDTHH:MM:SZ',
- 'read_only': false}]
+ sample:
+ [
+ {
+ "id": 0,
+ "key": "BASE64 encoded key",
+ "url": "http://example.com/github key",
+ "created_at": "YYYY-MM-DDTHH:MM:SZ",
+ "read_only": false
+ }
+ ]
key:
description: Metadata about the key just created. Only present on state=present.
type: dict
returned: success
- sample: {'id': 0, 'key': 'BASE64 encoded key', 'url': 'http://example.com/github key', 'created_at': 'YYYY-MM-DDTHH:MM:SZ',
- 'read_only': false}
+ sample:
+ {
+ "id": 0,
+ "key": "BASE64 encoded key",
+ "url": "http://example.com/github key",
+ "created_at": "YYYY-MM-DDTHH:MM:SZ",
+ "read_only": false
+ }
"""
EXAMPLES = r"""
@@ -91,6 +120,14 @@ EXAMPLES = r"""
name: Access Key for Some Machine
token: '{{ github_access_token }}'
pubkey: "{{ lookup('ansible.builtin.file', '/home/foo/.ssh/id_rsa.pub') }}"
+
+# GitHub Enterprise Server usage
+- name: Authorize key with GitHub Enterprise
+ community.general.github_key:
+ name: Access Key for Some Machine
+ token: '{{ github_enterprise_token }}'
+ pubkey: "{{ lookup('ansible.builtin.file', '/home/foo/.ssh/id_rsa.pub') }}"
+ api_url: 'https://github.company.com/api/v3'
"""
import datetime
@@ -105,9 +142,6 @@ from ansible_collections.community.general.plugins.module_utils.datetime import
)
-API_BASE = 'https://api.github.com'
-
-
class GitHubResponse(object):
def __init__(self, response, info):
self.content = response.read()
@@ -127,9 +161,10 @@ class GitHubResponse(object):
class GitHubSession(object):
- def __init__(self, module, token):
+ def __init__(self, module, token, api_url):
self.module = module
self.token = token
+ self.api_url = api_url.rstrip('/')
def request(self, method, url, data=None):
headers = {
@@ -147,7 +182,7 @@ class GitHubSession(object):
def get_all_keys(session):
- url = API_BASE + '/user/keys'
+ url = session.api_url + '/user/keys'
result = []
while url:
r = session.request('GET', url)
@@ -171,7 +206,7 @@ def create_key(session, name, pubkey, check_mode):
else:
return session.request(
'POST',
- API_BASE + '/user/keys',
+ session.api_url + '/user/keys',
data=json.dumps({'title': name, 'key': pubkey})).json()
@@ -180,7 +215,7 @@ def delete_keys(session, to_delete, check_mode):
return
for key in to_delete:
- session.request('DELETE', API_BASE + '/user/keys/%s' % key["id"])
+ session.request('DELETE', session.api_url + '/user/keys/%s' % key["id"])
def ensure_key_absent(session, name, check_mode):
@@ -228,6 +263,7 @@ def main():
'pubkey': {},
'state': {'choices': ['present', 'absent'], 'default': 'present'},
'force': {'default': True, 'type': 'bool'},
+ 'api_url': {'default': 'https://api.github.com', 'type': 'str'},
}
module = AnsibleModule(
argument_spec=argument_spec,
@@ -239,6 +275,7 @@ def main():
state = module.params['state']
force = module.params['force']
pubkey = module.params.get('pubkey')
+ api_url = module.params.get('api_url')
if pubkey:
pubkey_parts = pubkey.split(' ')
@@ -248,7 +285,7 @@ def main():
elif state == 'present':
module.fail_json(msg='"pubkey" is required when state=present')
- session = GitHubSession(module, token)
+ session = GitHubSession(module, token, api_url)
if state == 'present':
result = ensure_key_present(module, session, name, pubkey, force=force,
check_mode=module.check_mode)
diff --git a/plugins/modules/github_release.py b/plugins/modules/github_release.py
index 1376bf4f3d..eae2081701 100644
--- a/plugins/modules/github_release.py
+++ b/plugins/modules/github_release.py
@@ -182,13 +182,29 @@ def main():
else:
gh_obj = github3.GitHub()
- # test if we're actually logged in
- if password or login_token:
+ # GitHub's token formats:
+ # - ghp_ - Personal access token (classic)
+ # - github_pat_ - Fine-grained personal access token
+ # - gho_ - OAuth access token
+ # - ghu_ - User access token for a GitHub App
+ # - ghs_ - Installation access token for a GitHub App
+ # - ghr_ - Refresh token for a GitHub App
+ #
+ # References:
+ # https://docs.github.com/en/authentication/keeping-your-account-and-data-secure/about-authentication-to-github#githubs-token-formats
+ #
+ # Test if we're actually logged in, but skip this check for some token prefixes
+ SKIPPED_TOKEN_PREFIXES = ['ghs_']
+ if password or (login_token and not any(login_token.startswith(prefix) for prefix in SKIPPED_TOKEN_PREFIXES)):
gh_obj.me()
except github3.exceptions.AuthenticationFailed as e:
module.fail_json(msg='Failed to connect to GitHub: %s' % to_native(e),
details="Please check username and password or token "
"for repository %s" % repo)
+ except github3.exceptions.GitHubError as e:
+ module.fail_json(msg='GitHub API error: %s' % to_native(e),
+ details="Please check username and password or token "
+ "for repository %s" % repo)
repository = gh_obj.repository(user, repo)
diff --git a/plugins/modules/github_repo.py b/plugins/modules/github_repo.py
index 2d2c6f8588..abaddb3c28 100644
--- a/plugins/modules/github_repo.py
+++ b/plugins/modules/github_repo.py
@@ -72,7 +72,7 @@ options:
organization:
description:
- Organization for the repository.
- - When O(state=present), the repository will be created in the current user profile.
+ - When O(state=present), the repository is created in the current user profile.
type: str
required: false
api_url:
@@ -246,12 +246,12 @@ def main():
password=dict(type='str', no_log=True),
access_token=dict(type='str', no_log=True),
name=dict(type='str', required=True),
- state=dict(type='str', required=False, default="present",
+ state=dict(type='str', default="present",
choices=["present", "absent"]),
- organization=dict(type='str', required=False, default=None),
+ organization=dict(type='str', ),
private=dict(type='bool'),
description=dict(type='str'),
- api_url=dict(type='str', required=False, default='https://api.github.com'),
+ api_url=dict(type='str', default='https://api.github.com'),
force_defaults=dict(type='bool', default=True),
)
module = AnsibleModule(
diff --git a/plugins/modules/github_webhook.py b/plugins/modules/github_webhook.py
index 8608c90bc9..1ae2e71aaa 100644
--- a/plugins/modules/github_webhook.py
+++ b/plugins/modules/github_webhook.py
@@ -32,7 +32,7 @@ options:
- repo
url:
description:
- - URL to which payloads will be delivered.
+ - URL to which payloads are delivered.
type: str
required: true
content_type:
@@ -208,25 +208,16 @@ def main():
argument_spec=dict(
repository=dict(type='str', required=True, aliases=['repo']),
url=dict(type='str', required=True),
- content_type=dict(
- type='str',
- choices=('json', 'form'),
- required=False,
- default='form'),
- secret=dict(type='str', required=False, no_log=True),
- insecure_ssl=dict(type='bool', required=False, default=False),
- events=dict(type='list', elements='str', required=False),
- active=dict(type='bool', required=False, default=True),
- state=dict(
- type='str',
- required=False,
- choices=('absent', 'present'),
- default='present'),
+ content_type=dict(type='str', choices=('json', 'form'), default='form'),
+ secret=dict(type='str', no_log=True),
+ insecure_ssl=dict(type='bool', default=False),
+ events=dict(type='list', elements='str', ),
+ active=dict(type='bool', default=True),
+ state=dict(type='str', choices=('absent', 'present'), default='present'),
user=dict(type='str', required=True),
- password=dict(type='str', required=False, no_log=True),
- token=dict(type='str', required=False, no_log=True),
- github_url=dict(
- type='str', required=False, default="https://api.github.com")),
+ password=dict(type='str', no_log=True),
+ token=dict(type='str', no_log=True),
+ github_url=dict(type='str', default="https://api.github.com")),
mutually_exclusive=(('password', 'token'),),
required_one_of=(("password", "token"),),
required_if=(("state", "present", ("events",)),),
diff --git a/plugins/modules/github_webhook_info.py b/plugins/modules/github_webhook_info.py
index 440a373f1d..75315c77aa 100644
--- a/plugins/modules/github_webhook_info.py
+++ b/plugins/modules/github_webhook_info.py
@@ -76,16 +76,17 @@ hooks:
type: list
elements: dict
sample:
- - {
- "has_shared_secret": true,
- "url": "https://jenkins.example.com/ghprbhook/",
- "events": ["issue_comment", "pull_request"],
- "insecure_ssl": "1",
- "content_type": "json",
- "active": true,
- "id": 6206,
- "last_response": {"status": "active", "message": "OK", "code": 200}
- }
+ - has_shared_secret: true
+ url: https://jenkins.example.com/ghprbhook/
+ events: [issue_comment, pull_request]
+ insecure_ssl: "1"
+ content_type: json
+ active: true
+ id: 6206
+ last_response:
+ status: active
+ message: OK
+ code: 200
"""
import traceback
@@ -123,10 +124,10 @@ def main():
argument_spec=dict(
repository=dict(type='str', required=True, aliases=["repo"]),
user=dict(type='str', required=True),
- password=dict(type='str', required=False, no_log=True),
- token=dict(type='str', required=False, no_log=True),
+ password=dict(type='str', no_log=True),
+ token=dict(type='str', no_log=True),
github_url=dict(
- type='str', required=False, default="https://api.github.com")),
+ type='str', default="https://api.github.com")),
mutually_exclusive=(('password', 'token'), ),
required_one_of=(("password", "token"), ),
supports_check_mode=True)
diff --git a/plugins/modules/gitlab_branch.py b/plugins/modules/gitlab_branch.py
index b32169ef5a..6ed6e6a0c5 100644
--- a/plugins/modules/gitlab_branch.py
+++ b/plugins/modules/gitlab_branch.py
@@ -118,7 +118,7 @@ def main():
argument_spec.update(
project=dict(type='str', required=True),
branch=dict(type='str', required=True),
- ref_branch=dict(type='str', required=False),
+ ref_branch=dict(type='str'),
state=dict(type='str', default="present", choices=["absent", "present"]),
)
diff --git a/plugins/modules/gitlab_deploy_key.py b/plugins/modules/gitlab_deploy_key.py
index 5a2f582357..d116df0714 100644
--- a/plugins/modules/gitlab_deploy_key.py
+++ b/plugins/modules/gitlab_deploy_key.py
@@ -35,7 +35,7 @@ attributes:
options:
project:
description:
- - Id or Full path of project in the form of group/name.
+ - ID or Full path of project in the form of group/name.
required: true
type: str
title:
@@ -55,8 +55,8 @@ options:
default: false
state:
description:
- - When V(present) the deploy key added to the project if it does not exist.
- - When V(absent) it will be removed from the project if it exists.
+ - When V(present) the deploy key is added to the project if it does not exist.
+ - When V(absent) it is removed from the project if it exists.
default: present
type: str
choices: ["present", "absent"]
@@ -208,7 +208,7 @@ class GitLabDeployKey(object):
'''
def find_deploy_key(self, project, key_title):
for deploy_key in project.keys.list(**list_all_kwargs):
- if (deploy_key.title == key_title):
+ if deploy_key.title == key_title:
return deploy_key
'''
diff --git a/plugins/modules/gitlab_group.py b/plugins/modules/gitlab_group.py
index 711318c6d4..d6105642b8 100644
--- a/plugins/modules/gitlab_group.py
+++ b/plugins/modules/gitlab_group.py
@@ -13,8 +13,8 @@ DOCUMENTATION = r"""
module: gitlab_group
short_description: Creates/updates/deletes GitLab Groups
description:
- - When the group does not exist in GitLab, it will be created.
- - When the group does exist and state=absent, the group will be deleted.
+ - When the group does not exist in GitLab, it is created.
+ - When the group does exist and O(state=absent), the group is deleted.
author:
- Werner Dijkerman (@dj-wasabi)
- Guillaume Martinez (@Lunik)
@@ -97,12 +97,12 @@ options:
parent:
description:
- Allow to create subgroups.
- - Id or Full path of parent group in the form of group/name.
+ - ID or Full path of parent group in the form of group/name.
type: str
path:
description:
- - The path of the group you want to create, this will be api_url/group_path.
- - If not supplied, the group_name will be used.
+ - The path of the group you want to create, this is O(api_url)/O(path).
+ - If not supplied, O(name) is used.
type: str
prevent_forking_outside_group:
description:
@@ -129,7 +129,7 @@ options:
service_access_tokens_expiration_enforced:
description:
- Service account token expiration.
- - Changes will not affect existing token expiration dates.
+ - Changes do not affect existing token expiration dates.
- Only available for top level groups.
type: bool
version_added: 9.5.0
diff --git a/plugins/modules/gitlab_group_access_token.py b/plugins/modules/gitlab_group_access_token.py
index bcf75e056b..0fe6c14af2 100644
--- a/plugins/modules/gitlab_group_access_token.py
+++ b/plugins/modules/gitlab_group_access_token.py
@@ -28,7 +28,7 @@ extends_documentation_fragment:
- community.general.attributes
notes:
- Access tokens can not be changed. If a parameter needs to be changed, an acceess token has to be recreated. Whether tokens
- will be recreated is controlled by the O(recreate) option, which defaults to V(never).
+ are recreated or not is controlled by the O(recreate) option, which defaults to V(never).
- Token string is contained in the result only when access token is created or recreated. It can not be fetched afterwards.
- Token matching is done by comparing O(name) option.
attributes:
@@ -55,8 +55,16 @@ options:
type: list
elements: str
aliases: ["scope"]
- choices: ["api", "read_api", "read_registry", "write_registry", "read_repository", "write_repository", "create_runner",
- "ai_features", "k8s_proxy"]
+ choices:
+ - api
+ - read_api
+ - read_registry
+ - write_registry
+ - read_repository
+ - write_repository
+ - create_runner
+ - ai_features
+ - k8s_proxy
access_level:
description:
- Access level of the access token.
@@ -71,17 +79,17 @@ options:
required: true
recreate:
description:
- - Whether the access token will be recreated if it already exists.
- - When V(never) the token will never be recreated.
- - When V(always) the token will always be recreated.
- - When V(state_change) the token will be recreated if there is a difference between desired state and actual state.
+ - Whether the access token is recreated if it already exists.
+ - When V(never) the token is never recreated.
+ - When V(always) the token is always recreated.
+ - When V(state_change) the token is recreated if there is a difference between desired state and actual state.
type: str
choices: ["never", "always", "state_change"]
default: never
state:
description:
- - When V(present) the access token will be added to the group if it does not exist.
- - When V(absent) it will be removed from the group if it exists.
+ - When V(present) the access token is added to the group if it does not exist.
+ - When V(absent) it is removed from the group if it exists.
default: present
type: str
choices: ["present", "absent"]
@@ -185,9 +193,9 @@ class GitLabGroupAccessToken(object):
@param name of the access token
'''
def find_access_token(self, group, name):
- access_tokens = group.access_tokens.list(all=True)
+ access_tokens = [x for x in group.access_tokens.list(all=True) if not getattr(x, 'revoked', False)]
for access_token in access_tokens:
- if (access_token.name == name):
+ if access_token.name == name:
self.access_token_object = access_token
return False
return False
@@ -237,7 +245,7 @@ def main():
'create_runner',
'ai_features',
'k8s_proxy']),
- access_level=dict(type='str', required=False, default='maintainer', choices=['guest', 'reporter', 'developer', 'maintainer', 'owner']),
+ access_level=dict(type='str', default='maintainer', choices=['guest', 'reporter', 'developer', 'maintainer', 'owner']),
expires_at=dict(type='str', required=True),
recreate=dict(type='str', default='never', choices=['never', 'always', 'state_change'])
))
diff --git a/plugins/modules/gitlab_group_variable.py b/plugins/modules/gitlab_group_variable.py
index 926f4fe20a..10ca467bcd 100644
--- a/plugins/modules/gitlab_group_variable.py
+++ b/plugins/modules/gitlab_group_variable.py
@@ -15,9 +15,9 @@ short_description: Creates, updates, or deletes GitLab groups variables
version_added: 1.2.0
description:
- Creates a group variable if it does not exist.
- - When a group variable does exist, its value will be updated when the values are different.
+ - When a group variable does exist, its value is updated when the values are different.
- Variables which are untouched in the playbook, but are not untouched in the GitLab group, they stay untouched (O(purge=false))
- or will be deleted (O(purge=true)).
+ or are deleted (O(purge=true)).
author:
- Florent Madiot (@scodeman)
requirements:
@@ -52,7 +52,7 @@ options:
type: bool
vars:
description:
- - When the list element is a simple key-value pair, masked, raw and protected will be set to false.
+ - When the list element is a simple key-value pair, masked, raw and protected are set to V(false).
- When the list element is a dict with the keys C(value), C(masked), C(raw) and C(protected), the user can have full
control about whether a value should be masked, raw, protected or both.
- Support for group variables requires GitLab >= 9.5.
@@ -185,22 +185,22 @@ group_variable:
description: A list of variables which were created.
returned: always
type: list
- sample: ['ACCESS_KEY_ID', 'SECRET_ACCESS_KEY']
+ sample: ["ACCESS_KEY_ID", "SECRET_ACCESS_KEY"]
untouched:
description: A list of variables which exist.
returned: always
type: list
- sample: ['ACCESS_KEY_ID', 'SECRET_ACCESS_KEY']
+ sample: ["ACCESS_KEY_ID", "SECRET_ACCESS_KEY"]
removed:
description: A list of variables which were deleted.
returned: always
type: list
- sample: ['ACCESS_KEY_ID', 'SECRET_ACCESS_KEY']
+ sample: ["ACCESS_KEY_ID", "SECRET_ACCESS_KEY"]
updated:
description: A list of variables whose values were changed.
returned: always
type: list
- sample: ['ACCESS_KEY_ID', 'SECRET_ACCESS_KEY']
+ sample: ["ACCESS_KEY_ID", "SECRET_ACCESS_KEY"]
"""
from ansible.module_utils.basic import AnsibleModule
@@ -372,11 +372,11 @@ def main():
argument_spec.update(auth_argument_spec())
argument_spec.update(
group=dict(type='str', required=True),
- purge=dict(type='bool', required=False, default=False),
- vars=dict(type='dict', required=False, default=dict(), no_log=True),
+ purge=dict(type='bool', default=False),
+ vars=dict(type='dict', default=dict(), no_log=True),
# please mind whenever changing the variables dict to also change module_utils/gitlab.py's
# KNOWN dict in filter_returned_variables or bad evil will happen
- variables=dict(type='list', elements='dict', required=False, default=list(), options=dict(
+ variables=dict(type='list', elements='dict', default=list(), options=dict(
name=dict(type='str', required=True),
value=dict(type='str', no_log=True),
masked=dict(type='bool', default=False),
diff --git a/plugins/modules/gitlab_hook.py b/plugins/modules/gitlab_hook.py
index ccc3310ce5..87c8aa635a 100644
--- a/plugins/modules/gitlab_hook.py
+++ b/plugins/modules/gitlab_hook.py
@@ -35,18 +35,18 @@ attributes:
options:
project:
description:
- - Id or Full path of the project in the form of group/name.
+ - ID or Full path of the project in the form of group/name.
required: true
type: str
hook_url:
description:
- - The url that you want GitLab to post to, this is used as the primary key for updates and deletion.
+ - The URL that you want GitLab to post to, this is used as the primary key for updates and deletion.
required: true
type: str
state:
description:
- - When V(present) the hook will be updated to match the input or created if it does not exist.
- - When V(absent) hook will be deleted if it exists.
+ - When V(present) the hook is updated to match the input or created if it does not exist.
+ - When V(absent) hook is deleted if it exists.
default: present
type: str
choices: ["present", "absent"]
@@ -103,15 +103,15 @@ options:
version_added: '8.4.0'
hook_validate_certs:
description:
- - Whether GitLab will do SSL verification when triggering the hook.
+ - Whether GitLab performs SSL verification when triggering the hook.
type: bool
default: false
aliases: [enable_ssl_verification]
token:
description:
- Secret token to validate hook messages at the receiver.
- - If this is present it will always result in a change as it cannot be retrieved from GitLab.
- - Will show up in the X-GitLab-Token HTTP request header.
+ - If this is present it always results in a change as it cannot be retrieved from GitLab.
+ - It shows up in the C(X-GitLab-Token) HTTP request header.
required: false
type: str
"""
@@ -153,7 +153,7 @@ msg:
sample: "Success"
result:
- description: Json parsed response from the server.
+ description: JSON parsed response from the server.
returned: always
type: dict
@@ -271,7 +271,7 @@ class GitLabHook(object):
'''
def find_hook(self, project, hook_url):
for hook in project.hooks.list(**list_all_kwargs):
- if (hook.url == hook_url):
+ if hook.url == hook_url:
return hook
'''
@@ -307,7 +307,7 @@ def main():
job_events=dict(type='bool', default=False),
pipeline_events=dict(type='bool', default=False),
wiki_page_events=dict(type='bool', default=False),
- releases_events=dict(type='bool', default=None),
+ releases_events=dict(type='bool'),
hook_validate_certs=dict(type='bool', default=False, aliases=['enable_ssl_verification']),
token=dict(type='str', no_log=True),
))
diff --git a/plugins/modules/gitlab_instance_variable.py b/plugins/modules/gitlab_instance_variable.py
index be89238eb4..0f2c9b7752 100644
--- a/plugins/modules/gitlab_instance_variable.py
+++ b/plugins/modules/gitlab_instance_variable.py
@@ -16,10 +16,10 @@ short_description: Creates, updates, or deletes GitLab instance variables
version_added: 7.1.0
description:
- Creates a instance variable if it does not exist.
- - When a instance variable does exist, its value will be updated if the values are different.
+ - When a instance variable does exist, its value is updated if the values are different.
- Support for instance variables requires GitLab >= 13.0.
- - Variables which are not mentioned in the modules options, but are present on the GitLab instance, will either stay (O(purge=false))
- or will be deleted (O(purge=true)).
+ - Variables which are not mentioned in the modules options, but are present on the GitLab instance, either stay (O(purge=false))
+ or are deleted (O(purge=true)).
author:
- Benedikt Braunger (@benibr)
requirements:
@@ -74,6 +74,13 @@ options:
- Whether variable value is protected or not.
type: bool
default: false
+ raw:
+ description:
+ - Whether variable value is raw or not.
+ - Support for raw values requires GitLab >= 15.7.
+ type: bool
+ default: false
+ version_added: 10.2.0
variable_type:
description:
- Whether a variable is an environment variable (V(env_var)) or a file (V(file)).
@@ -117,22 +124,22 @@ instance_variable:
description: A list of variables which were created.
returned: always
type: list
- sample: ['ACCESS_KEY_ID', 'SECRET_ACCESS_KEY']
+ sample: ["ACCESS_KEY_ID", "SECRET_ACCESS_KEY"]
untouched:
description: A list of variables which exist.
returned: always
type: list
- sample: ['ACCESS_KEY_ID', 'SECRET_ACCESS_KEY']
+ sample: ["ACCESS_KEY_ID", "SECRET_ACCESS_KEY"]
removed:
description: A list of variables which were deleted.
returned: always
type: list
- sample: ['ACCESS_KEY_ID', 'SECRET_ACCESS_KEY']
+ sample: ["ACCESS_KEY_ID", "SECRET_ACCESS_KEY"]
updated:
description: A list pre-existing variables whose values have been set.
returned: always
type: list
- sample: ['ACCESS_KEY_ID', 'SECRET_ACCESS_KEY']
+ sample: ["ACCESS_KEY_ID", "SECRET_ACCESS_KEY"]
"""
from ansible.module_utils.basic import AnsibleModule
@@ -160,6 +167,7 @@ class GitlabInstanceVariables(object):
"value": var_obj.get('value'),
"masked": var_obj.get('masked'),
"protected": var_obj.get('protected'),
+ "raw": var_obj.get('raw'),
"variable_type": var_obj.get('variable_type'),
}
@@ -227,6 +235,8 @@ def native_python_main(this_gitlab, purge, requested_variables, state, module):
item['protected'] = False
if item.get('masked') is None:
item['masked'] = False
+ if item.get('raw') is None:
+ item['raw'] = False
if item.get('variable_type') is None:
item['variable_type'] = 'env_var'
@@ -291,12 +301,13 @@ def main():
argument_spec = basic_auth_argument_spec()
argument_spec.update(auth_argument_spec())
argument_spec.update(
- purge=dict(type='bool', required=False, default=False),
- variables=dict(type='list', elements='dict', required=False, default=list(), options=dict(
+ purge=dict(type='bool', default=False),
+ variables=dict(type='list', elements='dict', default=list(), options=dict(
name=dict(type='str', required=True),
value=dict(type='str', no_log=True),
masked=dict(type='bool', default=False),
protected=dict(type='bool', default=False),
+ raw=dict(type='bool', default=False),
variable_type=dict(type='str', default='env_var', choices=["env_var", "file"])
)),
state=dict(type='str', default="present", choices=["absent", "present"]),
diff --git a/plugins/modules/gitlab_issue.py b/plugins/modules/gitlab_issue.py
index 47b6f072e8..c6bf6f8328 100644
--- a/plugins/modules/gitlab_issue.py
+++ b/plugins/modules/gitlab_issue.py
@@ -18,8 +18,8 @@ short_description: Create, update, or delete GitLab issues
version_added: '8.1.0'
description:
- Creates an issue if it does not exist.
- - When an issue does exist, it will be updated if the provided parameters are different.
- - When an issue does exist and O(state=absent), the issue will be deleted.
+ - When an issue does exist, it is updated if the provided parameters are different.
+ - When an issue does exist and O(state=absent), the issue is deleted.
- When multiple issues are detected, the task fails.
- Existing issues are matched based on O(title) and O(state_filter) filters.
author:
@@ -284,13 +284,13 @@ def main():
argument_spec = basic_auth_argument_spec()
argument_spec.update(auth_argument_spec())
argument_spec.update(
- assignee_ids=dict(type='list', elements='str', required=False),
- description=dict(type='str', required=False),
- description_path=dict(type='path', required=False),
- issue_type=dict(type='str', default='issue', choices=["issue", "incident", "test_case"], required=False),
- labels=dict(type='list', elements='str', required=False),
- milestone_search=dict(type='str', required=False),
- milestone_group_id=dict(type='str', required=False),
+ assignee_ids=dict(type='list', elements='str'),
+ description=dict(type='str'),
+ description_path=dict(type='path'),
+ issue_type=dict(type='str', default='issue', choices=["issue", "incident", "test_case"]),
+ labels=dict(type='list', elements='str'),
+ milestone_search=dict(type='str'),
+ milestone_group_id=dict(type='str'),
project=dict(type='str', required=True),
state=dict(type='str', default="present", choices=["absent", "present"]),
state_filter=dict(type='str', default="opened", choices=["opened", "closed"]),
diff --git a/plugins/modules/gitlab_label.py b/plugins/modules/gitlab_label.py
index 44fbf1ae02..a139d1fcbd 100644
--- a/plugins/modules/gitlab_label.py
+++ b/plugins/modules/gitlab_label.py
@@ -12,8 +12,8 @@ module: gitlab_label
short_description: Creates/updates/deletes GitLab Labels belonging to project or group
version_added: 8.3.0
description:
- - When a label does not exist, it will be created.
- - When a label does exist, its value will be updated when the values are different.
+ - When a label does not exist, it is created.
+ - When a label does exist, its value is updated when the values are different.
- Labels can be purged.
author:
- "Gabriele Pongelli (@gpongelli)"
@@ -197,22 +197,22 @@ labels:
description: A list of labels which were created.
returned: always
type: list
- sample: ['abcd', 'label-one']
+ sample: ["abcd", "label-one"]
untouched:
description: A list of labels which exist.
returned: always
type: list
- sample: ['defg', 'new-label']
+ sample: ["defg", "new-label"]
removed:
description: A list of labels which were deleted.
returned: always
type: list
- sample: ['defg', 'new-label']
+ sample: ["defg", "new-label"]
updated:
description: A list pre-existing labels whose values have been set.
returned: always
type: list
- sample: ['defg', 'new-label']
+ sample: ["defg", "new-label"]
labels_obj:
description: API object.
returned: success
@@ -348,7 +348,7 @@ def native_python_main(this_gitlab, purge, requested_labels, state, module):
item.pop('description_html')
item.pop('text_color')
item.pop('subscribed')
- # field present only when it's a project's label
+ # field present only when it is a project's label
if 'is_project_label' in item:
item.pop('is_project_label')
item['new_name'] = None
@@ -410,16 +410,16 @@ def main():
argument_spec = basic_auth_argument_spec()
argument_spec.update(auth_argument_spec())
argument_spec.update(
- project=dict(type='str', required=False, default=None),
- group=dict(type='str', required=False, default=None),
- purge=dict(type='bool', required=False, default=False),
- labels=dict(type='list', elements='dict', required=False, default=list(),
+ project=dict(type='str'),
+ group=dict(type='str'),
+ purge=dict(type='bool', default=False),
+ labels=dict(type='list', elements='dict', default=list(),
options=dict(
name=dict(type='str', required=True),
- color=dict(type='str', required=False),
- description=dict(type='str', required=False),
- priority=dict(type='int', required=False),
- new_name=dict(type='str', required=False),)
+ color=dict(type='str'),
+ description=dict(type='str'),
+ priority=dict(type='int'),
+ new_name=dict(type='str'),)
),
state=dict(type='str', default="present", choices=["absent", "present"]),
)
@@ -472,7 +472,7 @@ def main():
if state == 'present':
_existing_labels = [x.asdict()['name'] for x in this_gitlab.list_all_labels()]
- # color is mandatory when creating label, but it's optional when changing name or updating other fields
+ # color is mandatory when creating label, but it is optional when changing name or updating other fields
if any(x['color'] is None and x['new_name'] is None and x['name'] not in _existing_labels for x in label_list):
module.fail_json(msg='color parameter is required for new labels')
diff --git a/plugins/modules/gitlab_merge_request.py b/plugins/modules/gitlab_merge_request.py
index fd6068980a..922b224c1f 100644
--- a/plugins/modules/gitlab_merge_request.py
+++ b/plugins/modules/gitlab_merge_request.py
@@ -18,8 +18,8 @@ short_description: Create, update, or delete GitLab merge requests
version_added: 7.1.0
description:
- Creates a merge request if it does not exist.
- - When a single merge request does exist, it will be updated if the provided parameters are different.
- - When a single merge request does exist and O(state=absent), the merge request will be deleted.
+ - When a single merge request does exist, it is updated if the provided parameters are different.
+ - When a single merge request does exist and O(state=absent), the merge request is deleted.
- When multiple merge requests are detected, the task fails.
- Existing merge requests are matched based on O(title), O(source_branch), O(target_branch), and O(state_filter) filters.
author:
@@ -287,13 +287,13 @@ def main():
source_branch=dict(type='str', required=True),
target_branch=dict(type='str', required=True),
title=dict(type='str', required=True),
- description=dict(type='str', required=False),
- labels=dict(type='str', default="", required=False),
- description_path=dict(type='path', required=False),
- remove_source_branch=dict(type='bool', default=False, required=False),
+ description=dict(type='str'),
+ labels=dict(type='str', default=""),
+ description_path=dict(type='path'),
+ remove_source_branch=dict(type='bool', default=False),
state_filter=dict(type='str', default="opened", choices=["opened", "closed", "locked", "merged"]),
- assignee_ids=dict(type='str', required=False),
- reviewer_ids=dict(type='str', required=False),
+ assignee_ids=dict(type='str'),
+ reviewer_ids=dict(type='str'),
state=dict(type='str', default="present", choices=["absent", "present"]),
)
diff --git a/plugins/modules/gitlab_milestone.py b/plugins/modules/gitlab_milestone.py
index 99b922c4dd..1406f96ffb 100644
--- a/plugins/modules/gitlab_milestone.py
+++ b/plugins/modules/gitlab_milestone.py
@@ -12,8 +12,8 @@ module: gitlab_milestone
short_description: Creates/updates/deletes GitLab Milestones belonging to project or group
version_added: 8.3.0
description:
- - When a milestone does not exist, it will be created.
- - When a milestone does exist, its value will be updated when the values are different.
+ - When a milestone does not exist, it is created.
+ - When a milestone does exist, its value is updated when the values are different.
- Milestones can be purged.
author:
- "Gabriele Pongelli (@gpongelli)"
@@ -181,22 +181,22 @@ milestones:
description: A list of milestones which were created.
returned: always
type: list
- sample: ['abcd', 'milestone-one']
+ sample: ["abcd", "milestone-one"]
untouched:
description: A list of milestones which exist.
returned: always
type: list
- sample: ['defg', 'new-milestone']
+ sample: ["defg", "new-milestone"]
removed:
description: A list of milestones which were deleted.
returned: always
type: list
- sample: ['defg', 'new-milestone']
+ sample: ["defg", "new-milestone"]
updated:
description: A list pre-existing milestones whose values have been set.
returned: always
type: list
- sample: ['defg', 'new-milestone']
+ sample: ["defg", "new-milestone"]
milestones_obj:
description: API object.
returned: success
@@ -411,15 +411,15 @@ def main():
argument_spec = basic_auth_argument_spec()
argument_spec.update(auth_argument_spec())
argument_spec.update(
- project=dict(type='str', required=False, default=None),
- group=dict(type='str', required=False, default=None),
- purge=dict(type='bool', required=False, default=False),
- milestones=dict(type='list', elements='dict', required=False, default=list(),
+ project=dict(type='str'),
+ group=dict(type='str'),
+ purge=dict(type='bool', default=False),
+ milestones=dict(type='list', elements='dict', default=list(),
options=dict(
title=dict(type='str', required=True),
- description=dict(type='str', required=False),
- due_date=dict(type='str', required=False),
- start_date=dict(type='str', required=False),)
+ description=dict(type='str'),
+ due_date=dict(type='str'),
+ start_date=dict(type='str'),)
),
state=dict(type='str', default="present", choices=["absent", "present"]),
)
diff --git a/plugins/modules/gitlab_project.py b/plugins/modules/gitlab_project.py
index 942e1d9816..1e2140e24a 100644
--- a/plugins/modules/gitlab_project.py
+++ b/plugins/modules/gitlab_project.py
@@ -13,9 +13,9 @@ DOCUMENTATION = r"""
module: gitlab_project
short_description: Creates/updates/deletes GitLab Projects
description:
- - When the project does not exist in GitLab, it will be created.
- - When the project does exist and O(state=absent), the project will be deleted.
- - When changes are made to the project, the project will be updated.
+ - When the project does not exist in GitLab, it is created.
+ - When the project does exist and O(state=absent), the project is deleted.
+ - When changes are made to the project, the project is updated.
author:
- Werner Dijkerman (@dj-wasabi)
- Guillaume Martinez (@Lunik)
@@ -44,6 +44,12 @@ options:
- This option is only used on creation, not for updates.
type: path
version_added: "4.2.0"
+ build_timeout:
+ description:
+ - Maximum number of seconds a CI job can run.
+ - If not specified on creation, GitLab imposes a default value.
+ type: int
+ version_added: "10.6.0"
builds_access_level:
description:
- V(private) means that repository CI/CD is allowed only to project members.
@@ -138,11 +144,11 @@ options:
version_added: "6.2.0"
group:
description:
- - Id or the full path of the group of which this projects belongs to.
+ - ID or the full path of the group of which this projects belongs to.
type: str
import_url:
description:
- - Git repository which will be imported into gitlab.
+ - Git repository which is imported into gitlab.
- GitLab server needs read access to this git repository.
required: false
type: str
@@ -156,7 +162,7 @@ options:
version_added: "6.4.0"
initialize_with_readme:
description:
- - Will initialize the project with a default C(README.md).
+ - Initializes the project with a default C(README.md).
- Is only used when the project is created, and ignored otherwise.
type: bool
default: false
@@ -242,8 +248,8 @@ options:
version_added: "9.3.0"
path:
description:
- - The path of the project you want to create, this will be server_url//path.
- - If not supplied, name will be used.
+ - The path of the project you want to create, this is server_url/O(group)/O(path).
+ - If not supplied, O(name) is used.
type: str
releases_access_level:
description:
@@ -430,6 +436,7 @@ class GitLabProject(object):
project_options = {
'allow_merge_on_skipped_pipeline': options['allow_merge_on_skipped_pipeline'],
'builds_access_level': options['builds_access_level'],
+ 'build_timeout': options['build_timeout'],
'ci_config_path': options['ci_config_path'],
'container_expiration_policy': options['container_expiration_policy'],
'container_registry_access_level': options['container_registry_access_level'],
@@ -591,8 +598,9 @@ def main():
allow_merge_on_skipped_pipeline=dict(type='bool'),
avatar_path=dict(type='path'),
builds_access_level=dict(type='str', choices=['private', 'disabled', 'enabled']),
+ build_timeout=dict(type='int'),
ci_config_path=dict(type='str'),
- container_expiration_policy=dict(type='dict', default=None, options=dict(
+ container_expiration_policy=dict(type='dict', options=dict(
cadence=dict(type='str', choices=["1d", "7d", "14d", "1month", "3month"]),
enabled=dict(type='bool'),
keep_n=dict(type='int', choices=[0, 1, 5, 10, 25, 50, 100]),
@@ -664,6 +672,7 @@ def main():
allow_merge_on_skipped_pipeline = module.params['allow_merge_on_skipped_pipeline']
avatar_path = module.params['avatar_path']
builds_access_level = module.params['builds_access_level']
+ build_timeout = module.params['build_timeout']
ci_config_path = module.params['ci_config_path']
container_expiration_policy = module.params['container_expiration_policy']
container_registry_access_level = module.params['container_registry_access_level']
@@ -748,6 +757,7 @@ def main():
"allow_merge_on_skipped_pipeline": allow_merge_on_skipped_pipeline,
"avatar_path": avatar_path,
"builds_access_level": builds_access_level,
+ "build_timeout": build_timeout,
"ci_config_path": ci_config_path,
"container_expiration_policy": container_expiration_policy,
"container_registry_access_level": container_registry_access_level,
diff --git a/plugins/modules/gitlab_project_access_token.py b/plugins/modules/gitlab_project_access_token.py
index a93d5531bf..3747870d9a 100644
--- a/plugins/modules/gitlab_project_access_token.py
+++ b/plugins/modules/gitlab_project_access_token.py
@@ -28,7 +28,7 @@ extends_documentation_fragment:
- community.general.attributes
notes:
- Access tokens can not be changed. If a parameter needs to be changed, an acceess token has to be recreated. Whether tokens
- will be recreated is controlled by the O(recreate) option, which defaults to V(never).
+ are recreated or not is controlled by the O(recreate) option, which defaults to V(never).
- Token string is contained in the result only when access token is created or recreated. It can not be fetched afterwards.
- Token matching is done by comparing O(name) option.
attributes:
@@ -55,8 +55,16 @@ options:
type: list
elements: str
aliases: ["scope"]
- choices: ["api", "read_api", "read_registry", "write_registry", "read_repository", "write_repository", "create_runner",
- "ai_features", "k8s_proxy"]
+ choices:
+ - api
+ - read_api
+ - read_registry
+ - write_registry
+ - read_repository
+ - write_repository
+ - create_runner
+ - ai_features
+ - k8s_proxy
access_level:
description:
- Access level of the access token.
@@ -71,17 +79,17 @@ options:
required: true
recreate:
description:
- - Whether the access token will be recreated if it already exists.
- - When V(never) the token will never be recreated.
- - When V(always) the token will always be recreated.
- - When V(state_change) the token will be recreated if there is a difference between desired state and actual state.
+ - Whether the access token is recreated if it already exists.
+ - When V(never) the token is never recreated.
+ - When V(always) the token is always recreated.
+ - When V(state_change) the token is recreated if there is a difference between desired state and actual state.
type: str
choices: ["never", "always", "state_change"]
default: never
state:
description:
- - When V(present) the access token will be added to the project if it does not exist.
- - When V(absent) it will be removed from the project if it exists.
+ - When V(present) the access token is added to the project if it does not exist.
+ - When V(absent) it is removed from the project if it exists.
default: present
type: str
choices: ["present", "absent"]
@@ -183,9 +191,9 @@ class GitLabProjectAccessToken(object):
@param name of the access token
'''
def find_access_token(self, project, name):
- access_tokens = project.access_tokens.list(all=True)
+ access_tokens = [x for x in project.access_tokens.list(all=True) if not getattr(x, 'revoked', False)]
for access_token in access_tokens:
- if (access_token.name == name):
+ if access_token.name == name:
self.access_token_object = access_token
return False
return False
@@ -235,7 +243,7 @@ def main():
'create_runner',
'ai_features',
'k8s_proxy']),
- access_level=dict(type='str', required=False, default='maintainer', choices=['guest', 'reporter', 'developer', 'maintainer', 'owner']),
+ access_level=dict(type='str', default='maintainer', choices=['guest', 'reporter', 'developer', 'maintainer', 'owner']),
expires_at=dict(type='str', required=True),
recreate=dict(type='str', default='never', choices=['never', 'always', 'state_change'])
))
diff --git a/plugins/modules/gitlab_project_members.py b/plugins/modules/gitlab_project_members.py
index 228af9a062..b5d0f6e2d1 100644
--- a/plugins/modules/gitlab_project_members.py
+++ b/plugins/modules/gitlab_project_members.py
@@ -48,8 +48,9 @@ options:
description:
- The access level for the user.
- Required if O(state=present), user state is set to present.
+ - V(owner) was added in community.general 10.6.0.
type: str
- choices: ['guest', 'reporter', 'developer', 'maintainer']
+ choices: ['guest', 'reporter', 'developer', 'maintainer', 'owner']
gitlab_users_access:
description:
- Provide a list of user to access level mappings.
@@ -67,8 +68,9 @@ options:
description:
- The access level for the user.
- Required if O(state=present), user state is set to present.
+ - V(owner) was added in community.general 10.6.0.
type: str
- choices: ['guest', 'reporter', 'developer', 'maintainer']
+ choices: ['guest', 'reporter', 'developer', 'maintainer', 'owner']
required: true
version_added: 3.7.0
state:
@@ -84,9 +86,10 @@ options:
- Adds/remove users of the given access_level to match the given O(gitlab_user)/O(gitlab_users_access) list. If omitted
do not purge orphaned members.
- Is only used when O(state=present).
+ - V(owner) was added in community.general 10.6.0.
type: list
elements: str
- choices: ['guest', 'reporter', 'developer', 'maintainer']
+ choices: ['guest', 'reporter', 'developer', 'maintainer', 'owner']
version_added: 3.7.0
"""
@@ -239,16 +242,16 @@ def main():
project=dict(type='str', required=True),
gitlab_user=dict(type='list', elements='str'),
state=dict(type='str', default='present', choices=['present', 'absent']),
- access_level=dict(type='str', choices=['guest', 'reporter', 'developer', 'maintainer']),
+ access_level=dict(type='str', choices=['guest', 'reporter', 'developer', 'maintainer', 'owner']),
purge_users=dict(type='list', elements='str', choices=[
- 'guest', 'reporter', 'developer', 'maintainer']),
+ 'guest', 'reporter', 'developer', 'maintainer', 'owner']),
gitlab_users_access=dict(
type='list',
elements='dict',
options=dict(
name=dict(type='str', required=True),
access_level=dict(type='str', choices=[
- 'guest', 'reporter', 'developer', 'maintainer'], required=True),
+ 'guest', 'reporter', 'developer', 'maintainer', 'owner'], required=True),
)
),
))
@@ -286,6 +289,7 @@ def main():
'reporter': gitlab.const.REPORTER_ACCESS,
'developer': gitlab.const.DEVELOPER_ACCESS,
'maintainer': gitlab.const.MAINTAINER_ACCESS,
+ 'owner': gitlab.const.OWNER_ACCESS,
}
gitlab_project = module.params['project']
diff --git a/plugins/modules/gitlab_project_variable.py b/plugins/modules/gitlab_project_variable.py
index 5903c9b5c4..4c261f5978 100644
--- a/plugins/modules/gitlab_project_variable.py
+++ b/plugins/modules/gitlab_project_variable.py
@@ -11,10 +11,10 @@ DOCUMENTATION = r"""
module: gitlab_project_variable
short_description: Creates/updates/deletes GitLab Projects Variables
description:
- - When a project variable does not exist, it will be created.
- - When a project variable does exist, its value will be updated when the values are different.
+ - When a project variable does not exist, it is created.
+ - When a project variable does exist, its value is updated when the values are different.
- Variables which are untouched in the playbook, but are not untouched in the GitLab project, they stay untouched (O(purge=false))
- or will be deleted (O(purge=true)).
+ or are deleted (O(purge=true)).
author:
- "Markus Bergholz (@markuman)"
requirements:
@@ -45,12 +45,12 @@ options:
type: str
purge:
description:
- - When set to true, all variables which are not untouched in the task will be deleted.
+ - When set to V(true), all variables which are not untouched in the task are deleted.
default: false
type: bool
vars:
description:
- - When the list element is a simple key-value pair, masked, raw and protected will be set to false.
+ - When the list element is a simple key-value pair, masked, raw and protected are set to V(false).
- When the list element is a dict with the keys C(value), C(masked), C(raw) and C(protected), the user can have full
control about whether a value should be masked, raw, protected or both.
- Support for protected values requires GitLab >= 9.3.
@@ -202,22 +202,22 @@ project_variable:
description: A list of variables which were created.
returned: always
type: list
- sample: ['ACCESS_KEY_ID', 'SECRET_ACCESS_KEY']
+ sample: ["ACCESS_KEY_ID", "SECRET_ACCESS_KEY"]
untouched:
description: A list of variables which exist.
returned: always
type: list
- sample: ['ACCESS_KEY_ID', 'SECRET_ACCESS_KEY']
+ sample: ["ACCESS_KEY_ID", "SECRET_ACCESS_KEY"]
removed:
description: A list of variables which were deleted.
returned: always
type: list
- sample: ['ACCESS_KEY_ID', 'SECRET_ACCESS_KEY']
+ sample: ["ACCESS_KEY_ID", "SECRET_ACCESS_KEY"]
updated:
description: A list of variables whose values were changed.
returned: always
type: list
- sample: ['ACCESS_KEY_ID', 'SECRET_ACCESS_KEY']
+ sample: ["ACCESS_KEY_ID", "SECRET_ACCESS_KEY"]
"""
from ansible.module_utils.basic import AnsibleModule
@@ -394,11 +394,11 @@ def main():
argument_spec.update(auth_argument_spec())
argument_spec.update(
project=dict(type='str', required=True),
- purge=dict(type='bool', required=False, default=False),
- vars=dict(type='dict', required=False, default=dict(), no_log=True),
+ purge=dict(type='bool', default=False),
+ vars=dict(type='dict', default=dict(), no_log=True),
# please mind whenever changing the variables dict to also change module_utils/gitlab.py's
# KNOWN dict in filter_returned_variables or bad evil will happen
- variables=dict(type='list', elements='dict', required=False, default=list(), options=dict(
+ variables=dict(type='list', elements='dict', default=list(), options=dict(
name=dict(type='str', required=True),
value=dict(type='str', no_log=True),
masked=dict(type='bool', default=False),
diff --git a/plugins/modules/gitlab_runner.py b/plugins/modules/gitlab_runner.py
index 62875c552a..87ba152ffa 100644
--- a/plugins/modules/gitlab_runner.py
+++ b/plugins/modules/gitlab_runner.py
@@ -81,8 +81,8 @@ options:
description:
- The registration token is used to register new runners before GitLab 16.0.
- Required if O(state=present) for GitLab < 16.0.
- - If set, the runner will be created using the old runner creation workflow.
- - If not set, the runner will be created using the new runner creation workflow, introduced in GitLab 16.0.
+ - If set, the runner is created using the old runner creation workflow.
+ - If not set, the runner is created using the new runner creation workflow, introduced in GitLab 16.0.
- If not set, requires python-gitlab >= 4.0.0.
type: str
owned:
@@ -122,8 +122,8 @@ options:
- If set to V(not_protected), runner can pick up jobs from both protected and unprotected branches.
- If set to V(ref_protected), runner can pick up jobs only from protected branches.
- Before community.general 8.0.0 the default was V(ref_protected). This was changed to no default in community.general
- 8.0.0. If this option is not specified explicitly, GitLab will use V(not_protected) on creation, and the value set
- will not be changed on any updates.
+ 8.0.0. If this option is not specified explicitly, GitLab uses V(not_protected) on creation, and the value set is
+ not changed on any updates.
required: false
choices: ["not_protected", "ref_protected"]
type: str
@@ -393,10 +393,10 @@ class GitLabRunner(object):
# python-gitlab 2.2 through at least 2.5 returns a list of dicts for list() instead of a Runner
# object, so we need to handle both
if hasattr(runner, "description"):
- if (runner.description == description):
+ if runner.description == description:
return self._gitlab.runners.get(runner.id)
else:
- if (runner['description'] == description):
+ if runner['description'] == description:
return self._gitlab.runners.get(runner['id'])
'''
diff --git a/plugins/modules/gitlab_user.py b/plugins/modules/gitlab_user.py
index dd8685312d..4d7bd506f6 100644
--- a/plugins/modules/gitlab_user.py
+++ b/plugins/modules/gitlab_user.py
@@ -14,10 +14,10 @@ DOCUMENTATION = r"""
module: gitlab_user
short_description: Creates/updates/deletes/blocks/unblocks GitLab Users
description:
- - When the user does not exist in GitLab, it will be created.
- - When the user exists and state=absent, the user will be deleted.
- - When the user exists and state=blocked, the user will be blocked.
- - When changes are made to user, the user will be updated.
+ - When the user does not exist in GitLab, it is created.
+ - When the user exists and state=absent, the user is deleted.
+ - When the user exists and state=blocked, the user is blocked.
+ - When changes are made to user, the user is updated.
notes:
- From community.general 0.2.0 and onwards, name, email and password are optional while deleting the user.
author:
@@ -82,7 +82,7 @@ options:
version_added: 3.1.0
group:
description:
- - Id or Full path of parent group in the form of group/name.
+ - ID or Full path of parent group in the form of group/name.
- Add user as a member to this group.
type: str
access_level:
diff --git a/plugins/modules/grove.py b/plugins/modules/grove.py
index abdc303f90..81417657c8 100644
--- a/plugins/modules/grove.py
+++ b/plugins/modules/grove.py
@@ -51,8 +51,8 @@ options:
required: false
validate_certs:
description:
- - If V(false), SSL certificates will not be validated. This should only be used on personally controlled sites using
- self-signed certificates.
+ - If V(false), SSL certificates are not validated. This should only be used on personally controlled sites using self-signed
+ certificates.
default: true
type: bool
author: "Jonas Pfenniger (@zimbatm)"
@@ -101,8 +101,8 @@ def main():
channel_token=dict(type='str', required=True, no_log=True),
message_content=dict(type='str', required=True),
service=dict(type='str', default='ansible'),
- url=dict(type='str', default=None),
- icon_url=dict(type='str', default=None),
+ url=dict(type='str'),
+ icon_url=dict(type='str'),
validate_certs=dict(default=True, type='bool'),
)
)
diff --git a/plugins/modules/gunicorn.py b/plugins/modules/gunicorn.py
index baf24c53b8..b524165c90 100644
--- a/plugins/modules/gunicorn.py
+++ b/plugins/modules/gunicorn.py
@@ -50,8 +50,8 @@ options:
pid:
type: path
description:
- - A filename to use for the PID file. If not set and not found on the configuration file a tmp pid file will be created
- to check a successful run of gunicorn.
+ - A filename to use for the PID file. If not set and not found on the configuration file a tmp pid file is created to
+ check a successful run of gunicorn.
worker:
type: str
choices: ['sync', 'eventlet', 'gevent', 'tornado ', 'gthread', 'gaiohttp']
@@ -62,8 +62,8 @@ options:
description:
- Switch worker processes to run as this user.
notes:
- - If not specified on config file, a temporary error log will be created on /tmp dir. Please make sure you have write access
- in /tmp dir. Not needed but will help you to identify any problem with configuration.
+ - If not specified on config file, a temporary error log is created on C(/tmp) directory. Please make sure you have write
+ access in C(/tmp) directory. Not needed but it is helpful to identify any problem with configuration.
"""
EXAMPLES = r"""
@@ -96,7 +96,7 @@ EXAMPLES = r"""
RETURN = r"""
gunicorn:
- description: Process id of gunicorn.
+ description: Process ID of gunicorn.
returned: changed
type: str
sample: "1234"
diff --git a/plugins/modules/haproxy.py b/plugins/modules/haproxy.py
index 9c60e59040..b0e56de061 100644
--- a/plugins/modules/haproxy.py
+++ b/plugins/modules/haproxy.py
@@ -32,7 +32,7 @@ options:
backend:
description:
- Name of the HAProxy backend pool.
- - If this parameter is unset, it will be auto-detected.
+ - If this parameter is unset, it is auto-detected.
type: str
drain:
description:
@@ -62,8 +62,7 @@ options:
state:
description:
- Desired state of the provided backend host.
- - Note that V(drain) state is supported only by HAProxy version 1.5 or later. When used on versions < 1.5, it will be
- ignored.
+ - Note that V(drain) state is supported only by HAProxy version 1.5 or later. When used on versions < 1.5, it is ignored.
type: str
required: true
choices: [disabled, drain, enabled]
@@ -103,7 +102,7 @@ options:
weight:
description:
- The value passed in argument.
- - If the value ends with the V(%) sign, then the new weight will be relative to the initially configured weight.
+ - If the value ends with the V(%) sign, then the new weight is relative to the initially configured weight.
- Relative weights are only permitted between 0 and 100% and absolute weights are permitted between 0 and 256.
type: str
"""
diff --git a/plugins/modules/hg.py b/plugins/modules/hg.py
index fd539ba54d..982364504c 100644
--- a/plugins/modules/hg.py
+++ b/plugins/modules/hg.py
@@ -61,16 +61,16 @@ options:
default: true
executable:
description:
- - Path to hg executable to use. If not supplied, the normal mechanism for resolving binary paths will be used.
+ - Path to C(hg) executable to use. If not supplied, the normal mechanism for resolving binary paths is used.
type: str
notes:
- This module does not support push capability. See U(https://github.com/ansible/ansible/issues/31156).
- - 'If the task seems to be hanging, first verify remote host is in C(known_hosts). SSH will prompt user to authorize the
- first contact with a remote host. To avoid this prompt, one solution is to add the remote host public key in C(/etc/ssh/ssh_known_hosts)
+ - 'If the task seems to be hanging, first verify remote host is in C(known_hosts). SSH prompts user to authorize the first
+ contact with a remote host. To avoid this prompt, one solution is to add the remote host public key in C(/etc/ssh/ssh_known_hosts)
before calling the hg module, with the following command: C(ssh-keyscan remote_host.com >> /etc/ssh/ssh_known_hosts).'
- As per 01 Dec 2018, Bitbucket has dropped support for TLSv1 and TLSv1.1 connections. As such, if the underlying system
- still uses a Python version below 2.7.9, you will have issues checking out bitbucket repositories.
- See U(https://bitbucket.org/blog/deprecating-tlsv1-tlsv1-1-2018-12-01).
+ still uses a Python version below 2.7.9, you are bound to have issues checking out bitbucket repositories. See
+ U(https://bitbucket.org/blog/deprecating-tlsv1-tlsv1-1-2018-12-01).
"""
EXAMPLES = r"""
@@ -205,7 +205,7 @@ class Hg(object):
if the desired changeset is already the current changeset.
"""
if self.revision is None or len(self.revision) < 7:
- # Assume it's a rev number, tag, or branch
+ # Assume it is a rev number, tag, or branch
return False
(rc, out, err) = self._command(['--debug', 'id', '-i', '-R', self.dest])
if rc != 0:
@@ -222,12 +222,12 @@ def main():
argument_spec=dict(
repo=dict(type='str', required=True, aliases=['name']),
dest=dict(type='path'),
- revision=dict(type='str', default=None, aliases=['version']),
+ revision=dict(type='str', aliases=['version']),
force=dict(type='bool', default=False),
purge=dict(type='bool', default=False),
update=dict(type='bool', default=True),
clone=dict(type='bool', default=True),
- executable=dict(type='str', default=None),
+ executable=dict(type='str'),
),
)
repo = module.params['repo']
diff --git a/plugins/modules/hipchat.py b/plugins/modules/hipchat.py
deleted file mode 100644
index e605278507..0000000000
--- a/plugins/modules/hipchat.py
+++ /dev/null
@@ -1,222 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-#
-# Copyright Ansible Project
-# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = r"""
-module: hipchat
-short_description: Send a message to Hipchat
-description:
- - Send a message to a Hipchat room, with options to control the formatting.
-extends_documentation_fragment:
- - community.general.attributes
-deprecated:
- removed_in: 11.0.0
- why: The hipchat service has been discontinued and the self-hosted variant has been End of Life since 2020.
- alternative: There is none.
-attributes:
- check_mode:
- support: full
- diff_mode:
- support: none
-options:
- token:
- type: str
- description:
- - API token.
- required: true
- room:
- type: str
- description:
- - ID or name of the room.
- required: true
- msg_from:
- type: str
- description:
- - Name the message will appear to be sent from. Max length is 15 characters - above this it will be truncated.
- default: Ansible
- aliases: [from]
- msg:
- type: str
- description:
- - The message body.
- required: true
- color:
- type: str
- description:
- - Background color for the message.
- default: yellow
- choices: ["yellow", "red", "green", "purple", "gray", "random"]
- msg_format:
- type: str
- description:
- - Message format.
- default: text
- choices: ["text", "html"]
- notify:
- description:
- - If true, a notification will be triggered for users in the room.
- type: bool
- default: true
- validate_certs:
- description:
- - If V(false), SSL certificates will not be validated. This should only be used on personally controlled sites using
- self-signed certificates.
- type: bool
- default: true
- api:
- type: str
- description:
- - API url if using a self-hosted hipchat server. For Hipchat API version 2 use the default URI with C(/v2) instead of
- C(/v1).
- default: 'https://api.hipchat.com/v1'
-
-author:
- - Shirou Wakayama (@shirou)
- - Paul Bourdel (@pb8226)
-"""
-
-EXAMPLES = r"""
-- name: Send a message to a Hipchat room
- community.general.hipchat:
- room: notif
- msg: Ansible task finished
-
-- name: Send a message to a Hipchat room using Hipchat API version 2
- community.general.hipchat:
- api: https://api.hipchat.com/v2/
- token: OAUTH2_TOKEN
- room: notify
- msg: Ansible task finished
-"""
-
-# ===========================================
-# HipChat module specific support methods.
-#
-
-import json
-import traceback
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible.module_utils.six.moves.urllib.parse import urlencode
-from ansible.module_utils.six.moves.urllib.request import pathname2url
-from ansible.module_utils.common.text.converters import to_native
-from ansible.module_utils.urls import fetch_url
-
-
-DEFAULT_URI = "https://api.hipchat.com/v1"
-
-MSG_URI_V1 = "/rooms/message"
-
-NOTIFY_URI_V2 = "/room/{id_or_name}/notification"
-
-
-def send_msg_v1(module, token, room, msg_from, msg, msg_format='text',
- color='yellow', notify=False, api=MSG_URI_V1):
- '''sending message to hipchat v1 server'''
-
- params = {}
- params['room_id'] = room
- params['from'] = msg_from[:15] # max length is 15
- params['message'] = msg
- params['message_format'] = msg_format
- params['color'] = color
- params['api'] = api
- params['notify'] = int(notify)
-
- url = api + MSG_URI_V1 + "?auth_token=%s" % (token)
- data = urlencode(params)
-
- if module.check_mode:
- # In check mode, exit before actually sending the message
- module.exit_json(changed=False)
-
- response, info = fetch_url(module, url, data=data)
- if info['status'] == 200:
- return response.read()
- else:
- module.fail_json(msg="failed to send message, return status=%s" % str(info['status']))
-
-
-def send_msg_v2(module, token, room, msg_from, msg, msg_format='text',
- color='yellow', notify=False, api=NOTIFY_URI_V2):
- '''sending message to hipchat v2 server'''
-
- headers = {'Authorization': 'Bearer %s' % token, 'Content-Type': 'application/json'}
-
- body = dict()
- body['message'] = msg
- body['color'] = color
- body['message_format'] = msg_format
- body['notify'] = notify
-
- POST_URL = api + NOTIFY_URI_V2
-
- url = POST_URL.replace('{id_or_name}', pathname2url(room))
- data = json.dumps(body)
-
- if module.check_mode:
- # In check mode, exit before actually sending the message
- module.exit_json(changed=False)
-
- response, info = fetch_url(module, url, data=data, headers=headers, method='POST')
-
- # https://www.hipchat.com/docs/apiv2/method/send_room_notification shows
- # 204 to be the expected result code.
- if info['status'] in [200, 204]:
- return response.read()
- else:
- module.fail_json(msg="failed to send message, return status=%s" % str(info['status']))
-
-
-# ===========================================
-# Module execution.
-#
-
-def main():
-
- module = AnsibleModule(
- argument_spec=dict(
- token=dict(required=True, no_log=True),
- room=dict(required=True),
- msg=dict(required=True),
- msg_from=dict(default="Ansible", aliases=['from']),
- color=dict(default="yellow", choices=["yellow", "red", "green",
- "purple", "gray", "random"]),
- msg_format=dict(default="text", choices=["text", "html"]),
- notify=dict(default=True, type='bool'),
- validate_certs=dict(default=True, type='bool'),
- api=dict(default=DEFAULT_URI),
- ),
- supports_check_mode=True
- )
-
- token = module.params["token"]
- room = str(module.params["room"])
- msg = module.params["msg"]
- msg_from = module.params["msg_from"]
- color = module.params["color"]
- msg_format = module.params["msg_format"]
- notify = module.params["notify"]
- api = module.params["api"]
-
- try:
- if api.find('/v2') != -1:
- send_msg_v2(module, token, room, msg_from, msg, msg_format, color, notify, api)
- else:
- send_msg_v1(module, token, room, msg_from, msg, msg_format, color, notify, api)
- except Exception as e:
- module.fail_json(msg="unable to send msg: %s" % to_native(e), exception=traceback.format_exc())
-
- changed = True
- module.exit_json(changed=changed, room=room, msg_from=msg_from, msg=msg)
-
-
-if __name__ == '__main__':
- main()
diff --git a/plugins/modules/homebrew.py b/plugins/modules/homebrew.py
index eaf7d652b4..021f990e67 100644
--- a/plugins/modules/homebrew.py
+++ b/plugins/modules/homebrew.py
@@ -83,7 +83,7 @@ options:
default: false
version_added: 9.0.0
notes:
- - When used with a C(loop:) each package will be processed individually, it is much more efficient to pass the list directly
+ - When used with a C(loop:) each package is processed individually, it is much more efficient to pass the list directly
to the O(name) option.
"""
@@ -173,7 +173,7 @@ changed_pkgs:
- List of package names which are changed after module run.
returned: success
type: list
- sample: ['git', 'git-cola']
+ sample: ["git", "git-cola"]
version_added: '0.2.0'
"""
@@ -307,21 +307,6 @@ class Homebrew(object):
self._params = self.module.params
return self._params
- @property
- def current_package(self):
- return self._current_package
-
- @current_package.setter
- def current_package(self, package):
- if not HomebrewValidate.valid_package(package):
- self._current_package = None
- self.failed = True
- self.message = 'Invalid package: {0}.'.format(package)
- raise HomebrewException(self.message)
-
- else:
- self._current_package = package
- return package
# /class properties -------------------------------------------- }}}
def __init__(self, module, path, packages=None, state=None,
@@ -346,13 +331,13 @@ class Homebrew(object):
def _setup_status_vars(self):
self.failed = False
self.changed = False
- self.changed_count = 0
- self.unchanged_count = 0
self.changed_pkgs = []
self.unchanged_pkgs = []
self.message = ''
def _setup_instance_vars(self, **kwargs):
+ self.installed_packages = set()
+ self.outdated_packages = set()
for key, val in iteritems(kwargs):
setattr(self, key, val)
@@ -379,8 +364,86 @@ class Homebrew(object):
return self.brew_path
- def _status(self):
- return (self.failed, self.changed, self.message)
+ def _validate_packages_names(self):
+ invalid_packages = []
+ for package in self.packages:
+ if not HomebrewValidate.valid_package(package):
+ invalid_packages.append(package)
+
+ if invalid_packages:
+ self.failed = True
+ self.message = 'Invalid package{0}: {1}'.format(
+ "s" if len(invalid_packages) > 1 else "",
+ ", ".join(invalid_packages),
+ )
+ raise HomebrewException(self.message)
+
+ def _save_package_info(self, package_detail, package_name):
+ if bool(package_detail.get("installed")):
+ self.installed_packages.add(package_name)
+ if bool(package_detail.get("outdated")):
+ self.outdated_packages.add(package_name)
+
+ def _extract_package_name(self, package_detail, is_cask):
+ # "brew info" can lookup by name, full_name, token, full_token, or aliases
+ # In addition, any name can be prefixed by the tap.
+ # Any of these can be supplied by the user as the package name. In case
+ # of ambiguity, where a given name might match multiple packages,
+ # formulae are preferred over casks. For all other ambiguities, the
+ # results are an error. Note that in the homebrew/core and
+ # homebrew/cask taps, there are no "other" ambiguities.
+ if is_cask: # according to brew info
+ name = package_detail["token"]
+ full_name = package_detail["full_token"]
+ else:
+ name = package_detail["name"]
+ full_name = package_detail["full_name"]
+
+ # Issue https://github.com/ansible-collections/community.general/issues/9803:
+ # name can include the tap as a prefix, in order to disambiguate,
+ # e.g. casks from identically named formulae.
+ #
+ # Issue https://github.com/ansible-collections/community.general/issues/10012:
+ # package_detail["tap"] is None if package is no longer available.
+ tapped_name = [package_detail["tap"] + "/" + name] if package_detail["tap"] else []
+ aliases = package_detail.get("aliases", [])
+ package_names = set([name, full_name] + tapped_name + aliases)
+
+ # Finally, identify which of all those package names was the one supplied by the user.
+ package_names = package_names & set(self.packages)
+ if len(package_names) != 1:
+ self.failed = True
+ self.message = "Package names are missing or ambiguous: " + ", ".join(str(p) for p in package_names)
+ raise HomebrewException(self.message)
+
+ # Then make sure the user provided name resurface.
+ return package_names.pop()
+
+ def _get_packages_info(self):
+ cmd = [
+ "{brew_path}".format(brew_path=self.brew_path),
+ "info",
+ "--json=v2",
+ ]
+ cmd.extend(self.packages)
+ if self.force_formula:
+ cmd.append("--formula")
+
+ rc, out, err = self.module.run_command(cmd)
+ if rc != 0:
+ self.failed = True
+ self.message = err.strip() or ("Unknown failure with exit code %d" % rc)
+ raise HomebrewException(self.message)
+
+ data = json.loads(out)
+ for package_detail in data.get("formulae", []):
+ package_name = self._extract_package_name(package_detail, is_cask=False)
+ self._save_package_info(package_detail, package_name)
+
+ for package_detail in data.get("casks", []):
+ package_name = self._extract_package_name(package_detail, is_cask=True)
+ self._save_package_info(package_detail, package_name)
+
# /prep -------------------------------------------------------- }}}
def run(self):
@@ -389,70 +452,14 @@ class Homebrew(object):
except HomebrewException:
pass
- if not self.failed and (self.changed_count + self.unchanged_count > 1):
+ changed_count = len(self.changed_pkgs)
+ unchanged_count = len(self.unchanged_pkgs)
+ if not self.failed and (changed_count + unchanged_count > 1):
self.message = "Changed: %d, Unchanged: %d" % (
- self.changed_count,
- self.unchanged_count,
+ changed_count,
+ unchanged_count,
)
- (failed, changed, message) = self._status()
-
- return (failed, changed, message)
-
- # checks ------------------------------------------------------- {{{
- def _current_package_is_installed(self):
- if not HomebrewValidate.valid_package(self.current_package):
- self.failed = True
- self.message = 'Invalid package: {0}.'.format(self.current_package)
- raise HomebrewException(self.message)
-
- cmd = [
- "{brew_path}".format(brew_path=self.brew_path),
- "info",
- "--json=v2",
- self.current_package,
- ]
- if self.force_formula:
- cmd.append("--formula")
- rc, out, err = self.module.run_command(cmd)
- if rc != 0:
- self.failed = True
- self.message = err.strip() or ("Unknown failure with exit code %d" % rc)
- raise HomebrewException(self.message)
- data = json.loads(out)
-
- return _check_package_in_json(data, "formulae") or _check_package_in_json(data, "casks")
-
- def _current_package_is_outdated(self):
- if not HomebrewValidate.valid_package(self.current_package):
- return False
-
- rc, out, err = self.module.run_command([
- self.brew_path,
- 'outdated',
- self.current_package,
- ])
-
- return rc != 0
-
- def _current_package_is_installed_from_head(self):
- if not HomebrewValidate.valid_package(self.current_package):
- return False
- elif not self._current_package_is_installed():
- return False
-
- rc, out, err = self.module.run_command([
- self.brew_path,
- 'info',
- self.current_package,
- ])
-
- try:
- version_info = [line for line in out.split('\n') if line][0]
- except IndexError:
- return False
-
- return version_info.split(' ')[-1] == 'HEAD'
- # /checks ------------------------------------------------------ }}}
+ return (self.failed, self.changed, self.message)
# commands ----------------------------------------------------- {{{
def _run(self):
@@ -463,6 +470,8 @@ class Homebrew(object):
self._upgrade_all()
if self.packages:
+ self._validate_packages_names()
+ self._get_packages_info()
if self.state == 'installed':
return self._install_packages()
elif self.state == 'upgraded':
@@ -532,24 +541,22 @@ class Homebrew(object):
# /_upgrade_all -------------------------- }}}
# installed ------------------------------ {{{
- def _install_current_package(self):
- if not HomebrewValidate.valid_package(self.current_package):
- self.failed = True
- self.message = 'Invalid package: {0}.'.format(self.current_package)
- raise HomebrewException(self.message)
+ def _install_packages(self):
+ packages_to_install = set(self.packages) - self.installed_packages
- if self._current_package_is_installed():
- self.unchanged_count += 1
- self.unchanged_pkgs.append(self.current_package)
- self.message = 'Package already installed: {0}'.format(
- self.current_package,
+ if len(packages_to_install) == 0:
+ self.unchanged_pkgs.extend(self.packages)
+ self.message = 'Package{0} already installed: {1}'.format(
+ "s" if len(self.packages) > 1 else "",
+ ", ".join(self.packages),
)
return True
if self.module.check_mode:
self.changed = True
- self.message = 'Package would be installed: {0}'.format(
- self.current_package
+ self.message = 'Package{0} would be installed: {1}'.format(
+ "s" if len(packages_to_install) > 1 else "",
+ ", ".join(packages_to_install)
)
raise HomebrewException(self.message)
@@ -566,76 +573,28 @@ class Homebrew(object):
opts = (
[self.brew_path, 'install']
+ self.install_options
- + [self.current_package, head, formula]
+ + list(packages_to_install)
+ + [head, formula]
)
cmd = [opt for opt in opts if opt]
rc, out, err = self.module.run_command(cmd)
- if self._current_package_is_installed():
- self.changed_count += 1
- self.changed_pkgs.append(self.current_package)
+ if rc == 0:
+ self.changed_pkgs.extend(packages_to_install)
+ self.unchanged_pkgs.extend(self.installed_packages)
self.changed = True
- self.message = 'Package installed: {0}'.format(self.current_package)
+ self.message = 'Package{0} installed: {1}'.format(
+ "s" if len(packages_to_install) > 1 else "",
+ ", ".join(packages_to_install)
+ )
return True
else:
self.failed = True
self.message = err.strip()
raise HomebrewException(self.message)
-
- def _install_packages(self):
- for package in self.packages:
- self.current_package = package
- self._install_current_package()
-
- return True
# /installed ----------------------------- }}}
# upgraded ------------------------------- {{{
- def _upgrade_current_package(self):
- command = 'upgrade'
-
- if not HomebrewValidate.valid_package(self.current_package):
- self.failed = True
- self.message = 'Invalid package: {0}.'.format(self.current_package)
- raise HomebrewException(self.message)
-
- if not self._current_package_is_installed():
- command = 'install'
-
- if self._current_package_is_installed() and not self._current_package_is_outdated():
- self.message = 'Package is already upgraded: {0}'.format(
- self.current_package,
- )
- self.unchanged_count += 1
- self.unchanged_pkgs.append(self.current_package)
- return True
-
- if self.module.check_mode:
- self.changed = True
- self.message = 'Package would be upgraded: {0}'.format(
- self.current_package
- )
- raise HomebrewException(self.message)
-
- opts = (
- [self.brew_path, command]
- + self.install_options
- + [self.current_package]
- )
- cmd = [opt for opt in opts if opt]
- rc, out, err = self.module.run_command(cmd)
-
- if self._current_package_is_installed() and not self._current_package_is_outdated():
- self.changed_count += 1
- self.changed_pkgs.append(self.current_package)
- self.changed = True
- self.message = 'Package upgraded: {0}'.format(self.current_package)
- return True
- else:
- self.failed = True
- self.message = err.strip()
- raise HomebrewException(self.message)
-
def _upgrade_all_packages(self):
opts = (
[self.brew_path, 'upgrade']
@@ -657,153 +616,188 @@ class Homebrew(object):
if not self.packages:
self._upgrade_all_packages()
else:
- for package in self.packages:
- self.current_package = package
- self._upgrade_current_package()
- return True
+ # There are 3 action possible here depending on installed and outdated states:
+ # - not installed -> 'install'
+ # - installed and outdated -> 'upgrade'
+ # - installed and NOT outdated -> Nothing to do!
+ packages_to_install = set(self.packages) - self.installed_packages
+ packages_to_upgrade = self.installed_packages & self.outdated_packages
+ packages_to_install_or_upgrade = packages_to_install | packages_to_upgrade
+
+ if len(packages_to_install_or_upgrade) == 0:
+ self.unchanged_pkgs.extend(self.packages)
+ self.message = 'Package{0} already upgraded: {1}'.format(
+ "s" if len(self.packages) > 1 else "",
+ ", ".join(self.packages),
+ )
+ return True
+
+ if self.module.check_mode:
+ self.changed = True
+ self.message = 'Package{0} would be upgraded: {1}'.format(
+ "s" if len(packages_to_install_or_upgrade) > 1 else "",
+ ", ".join(packages_to_install_or_upgrade)
+ )
+ raise HomebrewException(self.message)
+
+ for command, packages in [
+ ("install", packages_to_install),
+ ("upgrade", packages_to_upgrade)
+ ]:
+ if not packages:
+ continue
+
+ opts = (
+ [self.brew_path, command]
+ + self.install_options
+ + list(packages)
+ )
+ cmd = [opt for opt in opts if opt]
+ rc, out, err = self.module.run_command(cmd)
+
+ if rc != 0:
+ self.failed = True
+ self.message = err.strip()
+ raise HomebrewException(self.message)
+
+ self.changed_pkgs.extend(packages_to_install_or_upgrade)
+ self.unchanged_pkgs.extend(set(self.packages) - packages_to_install_or_upgrade)
+ self.changed = True
+ self.message = 'Package{0} upgraded: {1}'.format(
+ "s" if len(packages_to_install_or_upgrade) > 1 else "",
+ ", ".join(packages_to_install_or_upgrade),
+ )
# /upgraded ------------------------------ }}}
# uninstalled ---------------------------- {{{
- def _uninstall_current_package(self):
- if not HomebrewValidate.valid_package(self.current_package):
- self.failed = True
- self.message = 'Invalid package: {0}.'.format(self.current_package)
- raise HomebrewException(self.message)
+ def _uninstall_packages(self):
+ packages_to_uninstall = self.installed_packages & set(self.packages)
- if not self._current_package_is_installed():
- self.unchanged_count += 1
- self.unchanged_pkgs.append(self.current_package)
- self.message = 'Package already uninstalled: {0}'.format(
- self.current_package,
+ if len(packages_to_uninstall) == 0:
+ self.unchanged_pkgs.extend(self.packages)
+ self.message = 'Package{0} already uninstalled: {1}'.format(
+ "s" if len(self.packages) > 1 else "",
+ ", ".join(self.packages),
)
return True
if self.module.check_mode:
self.changed = True
- self.message = 'Package would be uninstalled: {0}'.format(
- self.current_package
+ self.message = 'Package{0} would be uninstalled: {1}'.format(
+ "s" if len(packages_to_uninstall) > 1 else "",
+ ", ".join(packages_to_uninstall)
)
raise HomebrewException(self.message)
opts = (
[self.brew_path, 'uninstall', '--force']
+ self.install_options
- + [self.current_package]
+ + list(packages_to_uninstall)
)
cmd = [opt for opt in opts if opt]
rc, out, err = self.module.run_command(cmd)
- if not self._current_package_is_installed():
- self.changed_count += 1
- self.changed_pkgs.append(self.current_package)
+ if rc == 0:
+ self.changed_pkgs.extend(packages_to_uninstall)
+ self.unchanged_pkgs.extend(set(self.packages) - self.installed_packages)
self.changed = True
- self.message = 'Package uninstalled: {0}'.format(self.current_package)
+ self.message = 'Package{0} uninstalled: {1}'.format(
+ "s" if len(packages_to_uninstall) > 1 else "",
+ ", ".join(packages_to_uninstall)
+ )
return True
else:
self.failed = True
self.message = err.strip()
raise HomebrewException(self.message)
-
- def _uninstall_packages(self):
- for package in self.packages:
- self.current_package = package
- self._uninstall_current_package()
-
- return True
# /uninstalled ----------------------------- }}}
# linked --------------------------------- {{{
- def _link_current_package(self):
- if not HomebrewValidate.valid_package(self.current_package):
+ def _link_packages(self):
+ missing_packages = set(self.packages) - self.installed_packages
+ if missing_packages:
self.failed = True
- self.message = 'Invalid package: {0}.'.format(self.current_package)
- raise HomebrewException(self.message)
-
- if not self._current_package_is_installed():
- self.failed = True
- self.message = 'Package not installed: {0}.'.format(self.current_package)
+ self.message = 'Package{0} not installed: {1}.'.format(
+ "s" if len(missing_packages) > 1 else "",
+ ", ".join(missing_packages),
+ )
raise HomebrewException(self.message)
if self.module.check_mode:
self.changed = True
- self.message = 'Package would be linked: {0}'.format(
- self.current_package
+ self.message = 'Package{0} would be linked: {1}'.format(
+ "s" if len(self.packages) > 1 else "",
+ ", ".join(self.packages)
)
raise HomebrewException(self.message)
opts = (
[self.brew_path, 'link']
+ self.install_options
- + [self.current_package]
+ + self.packages
)
cmd = [opt for opt in opts if opt]
rc, out, err = self.module.run_command(cmd)
if rc == 0:
- self.changed_count += 1
- self.changed_pkgs.append(self.current_package)
+ self.changed_pkgs.extend(self.packages)
self.changed = True
- self.message = 'Package linked: {0}'.format(self.current_package)
-
+ self.message = 'Package{0} linked: {1}'.format(
+ "s" if len(self.packages) > 1 else "",
+ ", ".join(self.packages)
+ )
return True
else:
self.failed = True
- self.message = 'Package could not be linked: {0}.'.format(self.current_package)
+ self.message = 'Package{0} could not be linked: {1}.'.format(
+ "s" if len(self.packages) > 1 else "",
+ ", ".join(self.packages)
+ )
raise HomebrewException(self.message)
-
- def _link_packages(self):
- for package in self.packages:
- self.current_package = package
- self._link_current_package()
-
- return True
# /linked -------------------------------- }}}
# unlinked ------------------------------- {{{
- def _unlink_current_package(self):
- if not HomebrewValidate.valid_package(self.current_package):
+ def _unlink_packages(self):
+ missing_packages = set(self.packages) - self.installed_packages
+ if missing_packages:
self.failed = True
- self.message = 'Invalid package: {0}.'.format(self.current_package)
- raise HomebrewException(self.message)
-
- if not self._current_package_is_installed():
- self.failed = True
- self.message = 'Package not installed: {0}.'.format(self.current_package)
+ self.message = 'Package{0} not installed: {1}.'.format(
+ "s" if len(missing_packages) > 1 else "",
+ ", ".join(missing_packages),
+ )
raise HomebrewException(self.message)
if self.module.check_mode:
self.changed = True
- self.message = 'Package would be unlinked: {0}'.format(
- self.current_package
+ self.message = 'Package{0} would be unlinked: {1}'.format(
+ "s" if len(self.packages) > 1 else "",
+ ", ".join(self.packages)
)
raise HomebrewException(self.message)
opts = (
[self.brew_path, 'unlink']
+ self.install_options
- + [self.current_package]
+ + self.packages
)
cmd = [opt for opt in opts if opt]
rc, out, err = self.module.run_command(cmd)
if rc == 0:
- self.changed_count += 1
- self.changed_pkgs.append(self.current_package)
+ self.changed_pkgs.extend(self.packages)
self.changed = True
- self.message = 'Package unlinked: {0}'.format(self.current_package)
-
+ self.message = 'Package{0} unlinked: {1}'.format(
+ "s" if len(self.packages) > 1 else "",
+ ", ".join(self.packages)
+ )
return True
else:
self.failed = True
- self.message = 'Package could not be unlinked: {0}.'.format(self.current_package)
+ self.message = 'Package{0} could not be unlinked: {1}.'.format(
+ "s" if len(self.packages) > 1 else "",
+ ", ".join(self.packages)
+ )
raise HomebrewException(self.message)
-
- def _unlink_packages(self):
- for package in self.packages:
- self.current_package = package
- self._unlink_current_package()
-
- return True
# /unlinked ------------------------------ }}}
# /commands ---------------------------------------------------- }}}
@@ -813,13 +807,11 @@ def main():
argument_spec=dict(
name=dict(
aliases=["pkg", "package", "formula"],
- required=False,
type='list',
elements='str',
),
path=dict(
default="/usr/local/bin:/opt/homebrew/bin:/home/linuxbrew/.linuxbrew/bin",
- required=False,
type='path',
),
state=dict(
@@ -841,13 +833,11 @@ def main():
type='bool',
),
install_options=dict(
- default=None,
aliases=['options'],
type='list',
elements='str',
),
upgrade_options=dict(
- default=None,
type='list',
elements='str',
),
@@ -864,7 +854,7 @@ def main():
p = module.params
if p['name']:
- packages = p['name']
+ packages = [package_name.lower() for package_name in p['name']]
else:
packages = None
diff --git a/plugins/modules/homebrew_cask.py b/plugins/modules/homebrew_cask.py
index 7455a61d69..948f5c1fd1 100644
--- a/plugins/modules/homebrew_cask.py
+++ b/plugins/modules/homebrew_cask.py
@@ -734,13 +734,11 @@ def main():
argument_spec=dict(
name=dict(
aliases=["pkg", "package", "cask"],
- required=False,
type='list',
elements='str',
),
path=dict(
default="/usr/local/bin:/opt/homebrew/bin",
- required=False,
type='path',
),
state=dict(
@@ -753,7 +751,6 @@ def main():
),
sudo_password=dict(
type="str",
- required=False,
no_log=True,
),
update_homebrew=dict(
@@ -761,7 +758,6 @@ def main():
type='bool',
),
install_options=dict(
- default=None,
aliases=['options'],
type='list',
elements='str',
diff --git a/plugins/modules/homebrew_tap.py b/plugins/modules/homebrew_tap.py
index f070ccccc7..f50472f90d 100644
--- a/plugins/modules/homebrew_tap.py
+++ b/plugins/modules/homebrew_tap.py
@@ -220,11 +220,10 @@ def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(aliases=['tap'], type='list', required=True, elements='str'),
- url=dict(default=None, required=False),
+ url=dict(),
state=dict(default='present', choices=['present', 'absent']),
path=dict(
default="/usr/local/bin:/opt/homebrew/bin:/home/linuxbrew/.linuxbrew/bin",
- required=False,
type='path',
),
),
diff --git a/plugins/modules/homectl.py b/plugins/modules/homectl.py
index f0b64be149..f93867d03b 100644
--- a/plugins/modules/homectl.py
+++ b/plugins/modules/homectl.py
@@ -65,7 +65,7 @@ options:
type: str
resize:
description:
- - When used with O(disksize) this will attempt to resize the home directory immediately.
+ - When used with O(disksize) this attempts to resize the home directory immediately.
default: false
type: bool
realname:
@@ -218,53 +218,54 @@ EXAMPLES = r"""
RETURN = r"""
data:
- description: A json dictionary returned from C(homectl inspect -j).
- returned: success
- type: dict
- sample: {
- "data": {
- "binding": {
- "e9ed2a5b0033427286b228e97c1e8343": {
- "fileSystemType": "btrfs",
- "fileSystemUuid": "7bd59491-2812-4642-a492-220c3f0c6c0b",
- "gid": 60268,
- "imagePath": "/home/james.home",
- "luksCipher": "aes",
- "luksCipherMode": "xts-plain64",
- "luksUuid": "7f05825a-2c38-47b4-90e1-f21540a35a81",
- "luksVolumeKeySize": 32,
- "partitionUuid": "5a906126-d3c8-4234-b230-8f6e9b427b2f",
- "storage": "luks",
- "uid": 60268
- }
- },
+ description: Dictionary returned from C(homectl inspect -j).
+ returned: success
+ type: dict
+ sample:
+ {
+ "data": {
+ "binding": {
+ "e9ed2a5b0033427286b228e97c1e8343": {
+ "fileSystemType": "btrfs",
+ "fileSystemUuid": "7bd59491-2812-4642-a492-220c3f0c6c0b",
+ "gid": 60268,
+ "imagePath": "/home/james.home",
+ "luksCipher": "aes",
+ "luksCipherMode": "xts-plain64",
+ "luksUuid": "7f05825a-2c38-47b4-90e1-f21540a35a81",
+ "luksVolumeKeySize": 32,
+ "partitionUuid": "5a906126-d3c8-4234-b230-8f6e9b427b2f",
+ "storage": "luks",
+ "uid": 60268
+ }
+ },
+ "diskSize": 3221225472,
+ "disposition": "regular",
+ "lastChangeUSec": 1641941238208691,
+ "lastPasswordChangeUSec": 1641941238208691,
+ "privileged": {
+ "hashedPassword": [
+ "$6$ov9AKni.trf76inT$tTtfSyHgbPTdUsG0CvSSQZXGqFGdHKQ9Pb6e0BTZhDmlgrL/vA5BxrXduBi8u/PCBiYUffGLIkGhApjKMK3bV."
+ ]
+ },
+ "signature": [
+ {
+ "data": "o6zVFbymcmk4YTVaY6KPQK23YCp+VkXdGEeniZeV1pzIbFzoaZBvVLPkNKMoPAQbodY5BYfBtuy41prNL78qAg==",
+ "key": "-----BEGIN PUBLIC KEY-----\nMCowBQYDK2VwAyEAbs7ELeiEYBxkUQhxZ+5NGyu6J7gTtZtZ5vmIw3jowcY=\n-----END PUBLIC KEY-----\n"
+ }
+ ],
+ "status": {
+ "e9ed2a5b0033427286b228e97c1e8343": {
+ "diskCeiling": 21845405696,
+ "diskFloor": 268435456,
"diskSize": 3221225472,
- "disposition": "regular",
- "lastChangeUSec": 1641941238208691,
- "lastPasswordChangeUSec": 1641941238208691,
- "privileged": {
- "hashedPassword": [
- "$6$ov9AKni.trf76inT$tTtfSyHgbPTdUsG0CvSSQZXGqFGdHKQ9Pb6e0BTZhDmlgrL/vA5BxrXduBi8u/PCBiYUffGLIkGhApjKMK3bV."
- ]
- },
- "signature": [
- {
- "data": "o6zVFbymcmk4YTVaY6KPQK23YCp+VkXdGEeniZeV1pzIbFzoaZBvVLPkNKMoPAQbodY5BYfBtuy41prNL78qAg==",
- "key": "-----BEGIN PUBLIC KEY-----\nMCowBQYDK2VwAyEAbs7ELeiEYBxkUQhxZ+5NGyu6J7gTtZtZ5vmIw3jowcY=\n-----END PUBLIC KEY-----\n"
- }
- ],
- "status": {
- "e9ed2a5b0033427286b228e97c1e8343": {
- "diskCeiling": 21845405696,
- "diskFloor": 268435456,
- "diskSize": 3221225472,
- "service": "io.systemd.Home",
- "signedLocally": true,
- "state": "inactive"
- }
- },
- "userName": "james",
- }
+ "service": "io.systemd.Home",
+ "signedLocally": true,
+ "state": "inactive"
+ }
+ },
+ "userName": "james"
+ }
}
"""
diff --git a/plugins/modules/honeybadger_deployment.py b/plugins/modules/honeybadger_deployment.py
index c653643e33..2512fc2642 100644
--- a/plugins/modules/honeybadger_deployment.py
+++ b/plugins/modules/honeybadger_deployment.py
@@ -51,7 +51,7 @@ options:
default: "https://api.honeybadger.io/v1/deploys"
validate_certs:
description:
- - If V(false), SSL certificates for the target url will not be validated. This should only be used on personally controlled
+ - If V(false), SSL certificates for the target URL are not validated. This should only be used on personally controlled
sites using self-signed certificates.
type: bool
default: true
@@ -67,7 +67,7 @@ EXAMPLES = r"""
repo: 'git@github.com:user/repo.git'
"""
-RETURN = """# """
+RETURN = """#"""
import traceback
@@ -88,9 +88,9 @@ def main():
token=dict(required=True, no_log=True),
environment=dict(required=True),
user=dict(required=False),
- repo=dict(required=False),
- revision=dict(required=False),
- url=dict(required=False, default='https://api.honeybadger.io/v1/deploys'),
+ repo=dict(),
+ revision=dict(),
+ url=dict(default='https://api.honeybadger.io/v1/deploys'),
validate_certs=dict(default=True, type='bool'),
),
supports_check_mode=True
diff --git a/plugins/modules/hpilo_boot.py b/plugins/modules/hpilo_boot.py
index 60f3ecc958..f04aaaed20 100644
--- a/plugins/modules/hpilo_boot.py
+++ b/plugins/modules/hpilo_boot.py
@@ -14,8 +14,8 @@ module: hpilo_boot
author: Dag Wieers (@dagwieers)
short_description: Boot system using specific media through HP iLO interface
description:
- - 'This module boots a system through its HP iLO interface. The boot media can be one of: V(cdrom), V(floppy), V(hdd),
- V(network), or V(usb).'
+ - 'This module boots a system through its HP iLO interface. The boot media can be one of: V(cdrom), V(floppy), V(hdd), V(network),
+ or V(usb).'
- This module requires the hpilo python module.
extends_documentation_fragment:
- community.general.attributes
@@ -55,19 +55,19 @@ options:
state:
description:
- The state of the boot media.
- - "V(no_boot): Do not boot from the device"
- - "V(boot_once): Boot from the device once and then notthereafter"
- - "V(boot_always): Boot from the device each time the server is rebooted"
- - "V(connect): Connect the virtual media device and set to boot_always"
- - "V(disconnect): Disconnects the virtual media device and set to no_boot"
- - "V(poweroff): Power off the server"
+ - 'V(no_boot): Do not boot from the device.'
+ - 'V(boot_once): Boot from the device once and then notthereafter.'
+ - 'V(boot_always): Boot from the device each time the server is rebooted.'
+ - 'V(connect): Connect the virtual media device and set to boot_always.'
+ - 'V(disconnect): Disconnects the virtual media device and set to no_boot.'
+ - 'V(poweroff): Power off the server.'
default: boot_once
type: str
choices: ["boot_always", "boot_once", "connect", "disconnect", "no_boot", "poweroff"]
force:
description:
- Whether to force a reboot (even when the system is already booted).
- - As a safeguard, without force, hpilo_boot will refuse to reboot a server that is already running.
+ - As a safeguard, without force, M(community.general.hpilo_boot) refuses to reboot a server that is already running.
default: false
type: bool
ssl_version:
@@ -76,6 +76,12 @@ options:
default: TLSv1
type: str
choices: ["SSLv3", "SSLv23", "TLSv1", "TLSv1_1", "TLSv1_2"]
+ idempotent_boot_once:
+ description:
+ - This option makes O(state=boot_once) succeed instead of failing when the server is already powered on.
+ type: bool
+ default: false
+ version_added: 10.6.0
requirements:
- python-hpilo
notes:
@@ -138,6 +144,7 @@ def main():
image=dict(type='str'),
state=dict(type='str', default='boot_once', choices=['boot_always', 'boot_once', 'connect', 'disconnect', 'no_boot', 'poweroff']),
force=dict(type='bool', default=False),
+ idempotent_boot_once=dict(type='bool', default=False),
ssl_version=dict(type='str', default='TLSv1', choices=['SSLv3', 'SSLv23', 'TLSv1', 'TLSv1_1', 'TLSv1_2']),
)
)
@@ -152,6 +159,7 @@ def main():
image = module.params['image']
state = module.params['state']
force = module.params['force']
+ idempotent_boot_once = module.params['idempotent_boot_once']
ssl_version = getattr(hpilo.ssl, 'PROTOCOL_' + module.params.get('ssl_version').upper().replace('V', 'v'))
ilo = hpilo.Ilo(host, login=login, password=password, ssl_version=ssl_version)
@@ -187,13 +195,21 @@ def main():
power_status = ilo.get_host_power_status()
- if not force and power_status == 'ON':
- module.fail_json(msg='HP iLO (%s) reports that the server is already powered on !' % host)
-
if power_status == 'ON':
- ilo.warm_boot_server()
-# ilo.cold_boot_server()
- changed = True
+ if not force and not idempotent_boot_once:
+ # module.deprecate(
+ # 'The failure of the module when the server is already powered on is being deprecated.'
+ # ' Please set the parameter "idempotent_boot_once=true" to start using the new behavior.',
+ # version='11.0.0',
+ # collection_name='community.general'
+ # )
+ module.fail_json(msg='HP iLO (%s) reports that the server is already powered on !' % host)
+ elif not force and idempotent_boot_once:
+ pass
+ elif force:
+ ilo.warm_boot_server()
+ # ilo.cold_boot_server()
+ changed = True
else:
ilo.press_pwr_btn()
# ilo.reset_server()
diff --git a/plugins/modules/hpilo_info.py b/plugins/modules/hpilo_info.py
index 70eecb8b0e..90680603e8 100644
--- a/plugins/modules/hpilo_info.py
+++ b/plugins/modules/hpilo_info.py
@@ -121,7 +121,7 @@ hw_uuid:
host_power_status:
description:
- Power status of host.
- - Will be one of V(ON), V(OFF) and V(UNKNOWN).
+ - It is one of V(ON), V(OFF) and V(UNKNOWN).
returned: always
type: str
sample: "ON"
diff --git a/plugins/modules/hponcfg.py b/plugins/modules/hponcfg.py
index 654ba2c710..c2d32c7d89 100644
--- a/plugins/modules/hponcfg.py
+++ b/plugins/modules/hponcfg.py
@@ -97,7 +97,6 @@ class HPOnCfg(ModuleHelper):
verbose=cmd_runner_fmt.as_bool("-v"),
minfw=cmd_runner_fmt.as_opt_val("-m"),
)
- use_old_vardict = False
def __run__(self):
runner = CmdRunner(
diff --git a/plugins/modules/htpasswd.py b/plugins/modules/htpasswd.py
index de94765130..d8a755476f 100644
--- a/plugins/modules/htpasswd.py
+++ b/plugins/modules/htpasswd.py
@@ -46,10 +46,13 @@ options:
description:
- Hashing scheme to be used. As well as the four choices listed here, you can also use any other hash supported by passlib,
such as V(portable_apache22) and V(host_apache24); or V(md5_crypt) and V(sha256_crypt), which are Linux passwd hashes.
- Only some schemes in addition to the four choices below will be compatible with Apache or Nginx, and supported schemes
- depend on passlib version and its dependencies.
+ Only some schemes in addition to the four choices below are compatible with Apache or Nginx, and supported schemes
+ depend on C(passlib) version and its dependencies.
- See U(https://passlib.readthedocs.io/en/stable/lib/passlib.apache.html#passlib.apache.HtpasswdFile) parameter C(default_scheme).
- 'Some of the available choices might be: V(apr_md5_crypt), V(des_crypt), V(ldap_sha1), V(plaintext).'
+ - 'B(WARNING): The module has no mechanism to determine the O(hash_scheme) of an existing entry, therefore, it does
+ not detect whether the O(hash_scheme) has changed. If you want to change the scheme, you must remove the existing
+ entry and then create a new one using the new scheme.'
aliases: [crypt_scheme]
state:
type: str
@@ -63,8 +66,8 @@ options:
type: bool
default: true
description:
- - Used with O(state=present). If V(true), the file will be created if it does not exist. Conversely, if set to V(false)
- and the file does not exist it will fail.
+ - Used with O(state=present). If V(true), the file is created if it does not exist. Conversely, if set to V(false) and
+ the file does not exist, it fails.
notes:
- This module depends on the C(passlib) Python library, which needs to be installed on all target systems.
- 'On Debian < 11, Ubuntu <= 20.04, or Fedora: install C(python-passlib).'
@@ -85,7 +88,7 @@ EXAMPLES = r"""
password: '9s36?;fyNp'
owner: root
group: www-data
- mode: 0640
+ mode: '0640'
- name: Remove a user from a password file
community.general.htpasswd:
@@ -188,9 +191,9 @@ def main():
arg_spec = dict(
path=dict(type='path', required=True, aliases=["dest", "destfile"]),
name=dict(type='str', required=True, aliases=["username"]),
- password=dict(type='str', required=False, default=None, no_log=True),
- hash_scheme=dict(type='str', required=False, default="apr_md5_crypt", aliases=["crypt_scheme"]),
- state=dict(type='str', required=False, default="present", choices=["present", "absent"]),
+ password=dict(type='str', no_log=True),
+ hash_scheme=dict(type='str', default="apr_md5_crypt", aliases=["crypt_scheme"]),
+ state=dict(type='str', default="present", choices=["present", "absent"]),
create=dict(type='bool', default=True),
)
@@ -238,8 +241,8 @@ def main():
(msg, changed) = present(path, username, password, hash_scheme, create, check_mode)
elif state == 'absent':
if not os.path.exists(path):
- module.exit_json(msg="%s not present" % username,
- warnings="%s does not exist" % path, changed=False)
+ module.warn("%s does not exist" % path)
+ module.exit_json(msg="%s not present" % username, changed=False)
(msg, changed) = absent(path, username, check_mode)
else:
module.fail_json(msg="Invalid state: %s" % state)
diff --git a/plugins/modules/hwc_ecs_instance.py b/plugins/modules/hwc_ecs_instance.py
index f01b7c48fd..13becdf07f 100644
--- a/plugins/modules/hwc_ecs_instance.py
+++ b/plugins/modules/hwc_ecs_instance.py
@@ -442,7 +442,7 @@ created:
disk_config_type:
description:
- Specifies the disk configuration type. MANUAL is The image space is not expanded. AUTO is the image space of the system
- disk will be expanded to be as same as the flavor.
+ disk is expanded to be as same as the flavor.
type: str
returned: success
host_name:
diff --git a/plugins/modules/hwc_evs_disk.py b/plugins/modules/hwc_evs_disk.py
index 0763c07b01..0d57caf6cb 100644
--- a/plugins/modules/hwc_evs_disk.py
+++ b/plugins/modules/hwc_evs_disk.py
@@ -70,8 +70,8 @@ options:
- SSD specifies the ultra-high I/O disk type.
- SAS specifies the high I/O disk type.
- SATA specifies the common I/O disk type.
- - If the specified disk type is not available in the AZ, the disk will fail to create. If the EVS disk is created from
- a snapshot, the volume_type field must be the same as that of the snapshot's source disk.
+ - If the specified disk type is not available in the AZ, the disk creation fails. If the EVS disk is created from a
+ snapshot, the volume_type field must be the same as that of the snapshot's source disk.
type: str
required: true
backup_id:
@@ -92,9 +92,9 @@ options:
required: false
enable_scsi:
description:
- - If this parameter is set to True, the disk device type will be SCSI, which allows ECS OSs to directly access underlying
- storage media. SCSI reservation command is supported. If this parameter is set to False, the disk device type will
- be VBD, which supports only simple SCSI read/write commands.
+ - If this parameter is set to V(true), the disk device type is SCSI, which allows ECS OSs to directly access underlying
+ storage media. SCSI reservation command is supported. If this parameter is set to V(false), the disk device type is
+ VBD, which supports only simple SCSI read/write commands.
- If parameter enable_share is set to True and this parameter is not specified, shared SCSI disks are created. SCSI
EVS disks cannot be created from backups, which means that this parameter cannot be True if backup_id has been specified.
type: bool
@@ -167,8 +167,8 @@ volume_type:
- SSD specifies the ultra-high I/O disk type.
- SAS specifies the high I/O disk type.
- SATA specifies the common I/O disk type.
- - If the specified disk type is not available in the AZ, the disk will fail to create. If the EVS disk is created from
- a snapshot, the volume_type field must be the same as that of the snapshot's source disk.
+ - If the specified disk type is not available in the AZ, the disk creation fails. If the EVS disk is created from a snapshot,
+ the volume_type field must be the same as that of the snapshot's source disk.
type: str
returned: success
backup_id:
@@ -189,8 +189,8 @@ enable_full_clone:
returned: success
enable_scsi:
description:
- - If this parameter is set to True, the disk device type will be SCSI, which allows ECS OSs to directly access underlying
- storage media. SCSI reservation command is supported. If this parameter is set to False, the disk device type will be
+ - If this parameter is set to V(true), the disk device type is SCSI, which allows ECS OSs to directly access underlying
+ storage media. SCSI reservation command is supported. If this parameter is set to V(false), the disk device type is
VBD, which supports only simple SCSI read/write commands.
- If parameter enable_share is set to True and this parameter is not specified, shared SCSI disks are created. SCSI EVS
disks cannot be created from backups, which means that this parameter cannot be True if backup_id has been specified.
diff --git a/plugins/modules/hwc_network_vpc.py b/plugins/modules/hwc_network_vpc.py
index 5c0c2c8b61..d34e428d6a 100644
--- a/plugins/modules/hwc_network_vpc.py
+++ b/plugins/modules/hwc_network_vpc.py
@@ -86,22 +86,22 @@ EXAMPLES = r"""
RETURN = r"""
id:
description:
- - The id of vpc.
+ - The ID of VPC.
type: str
returned: success
name:
description:
- - The name of vpc.
+ - The name of VPC.
type: str
returned: success
cidr:
description:
- - The range of available subnets in the vpc.
+ - The range of available subnets in the VPC.
type: str
returned: success
status:
description:
- - The status of vpc.
+ - The status of VPC.
type: str
returned: success
routes:
@@ -117,12 +117,12 @@ routes:
returned: success
next_hop:
description:
- - The next hop of a route. If the route type is peering, it will provide VPC peering connection ID.
+ - The next hop of a route. If the route type is peering, it provides VPC peering connection ID.
type: str
returned: success
enable_shared_snat:
description:
- - Show whether the shared snat is enabled.
+ - Show whether the shared SNAT is enabled.
type: bool
returned: success
"""
diff --git a/plugins/modules/hwc_vpc_eip.py b/plugins/modules/hwc_vpc_eip.py
index b818fe0d86..e830c2b14b 100644
--- a/plugins/modules/hwc_vpc_eip.py
+++ b/plugins/modules/hwc_vpc_eip.py
@@ -92,7 +92,7 @@ options:
required: false
ip_version:
description:
- - The value can be 4 (IPv4 address) or 6 (IPv6 address). If this parameter is left blank, an IPv4 address will be assigned.
+ - The value can be 4 (IPv4 address) or 6 (IPv6 address). If this parameter is left blank, an IPv4 address is assigned.
type: int
required: false
ipv4_address:
@@ -193,7 +193,7 @@ enterprise_project_id:
returned: success
ip_version:
description:
- - The value can be 4 (IPv4 address) or 6 (IPv6 address). If this parameter is left blank, an IPv4 address will be assigned.
+ - The value can be 4 (IPv4 address) or 6 (IPv6 address). If this parameter is left blank, an IPv4 address is assigned.
type: int
returned: success
ipv4_address:
diff --git a/plugins/modules/hwc_vpc_private_ip.py b/plugins/modules/hwc_vpc_private_ip.py
index 695c644cb9..e665568774 100644
--- a/plugins/modules/hwc_vpc_private_ip.py
+++ b/plugins/modules/hwc_vpc_private_ip.py
@@ -21,7 +21,7 @@ notes:
- If O(id) option is provided, it takes precedence over O(subnet_id), O(ip_address) for private IP selection.
- O(subnet_id), O(ip_address) are used for private IP selection. If more than one private IP with this options exists, execution
is aborted.
- - No parameter support updating. If one of option is changed, the module will create a new resource.
+ - No parameter support updating. If one of option is changed, the module creates a new resource.
version_added: '0.2.0'
author: Huawei Inc. (@huaweicloud)
requirements:
diff --git a/plugins/modules/hwc_vpc_route.py b/plugins/modules/hwc_vpc_route.py
index 85224fd4c8..20bbba6cd8 100644
--- a/plugins/modules/hwc_vpc_route.py
+++ b/plugins/modules/hwc_vpc_route.py
@@ -21,7 +21,7 @@ notes:
- If O(id) option is provided, it takes precedence over O(destination), O(vpc_id), O(type), and O(next_hop) for route selection.
- O(destination), O(vpc_id), O(type) and O(next_hop) are used for route selection. If more than one route with this options
exists, execution is aborted.
- - No parameter support updating. If one of option is changed, the module will create a new resource.
+ - No parameter support updating. If one of option is changed, the module creates a new resource.
version_added: '0.2.0'
author: Huawei Inc. (@huaweicloud)
requirements:
diff --git a/plugins/modules/hwc_vpc_security_group.py b/plugins/modules/hwc_vpc_security_group.py
index 9f53b49c0d..e1b2b41ae4 100644
--- a/plugins/modules/hwc_vpc_security_group.py
+++ b/plugins/modules/hwc_vpc_security_group.py
@@ -22,7 +22,7 @@ notes:
selection.
- O(name), O(enterprise_project_id) and O(vpc_id) are used for security group selection. If more than one security group
with this options exists, execution is aborted.
- - No parameter support updating. If one of option is changed, the module will create a new resource.
+ - No parameter support updating. If one of option is changed, the module creates a new resource.
version_added: '0.2.0'
author: Huawei Inc. (@huaweicloud)
requirements:
diff --git a/plugins/modules/hwc_vpc_security_group_rule.py b/plugins/modules/hwc_vpc_security_group_rule.py
index 0848901cd5..42f854a029 100644
--- a/plugins/modules/hwc_vpc_security_group_rule.py
+++ b/plugins/modules/hwc_vpc_security_group_rule.py
@@ -21,7 +21,7 @@ notes:
- If O(id) option is provided, it takes precedence over O(security_group_id) for security group rule selection.
- O(security_group_id) is used for security group rule selection. If more than one security group rule with this options
exists, execution is aborted.
- - No parameter support updating. If one of option is changed, the module will create a new resource.
+ - No parameter support updating. If one of option is changed, the module creates a new resource.
version_added: '0.2.0'
author: Huawei Inc. (@huaweicloud)
requirements:
diff --git a/plugins/modules/hwc_vpc_subnet.py b/plugins/modules/hwc_vpc_subnet.py
index 84a9219370..b9af890688 100644
--- a/plugins/modules/hwc_vpc_subnet.py
+++ b/plugins/modules/hwc_vpc_subnet.py
@@ -86,7 +86,7 @@ options:
required: false
dns_address:
description:
- - Specifies the DNS server addresses for subnet. The address in the head will be used first.
+ - Specifies the DNS server addresses for subnet. The address in the head is used first.
type: list
elements: str
required: false
@@ -148,7 +148,7 @@ dhcp_enable:
returned: success
dns_address:
description:
- - Specifies the DNS server addresses for subnet. The address in the head will be used first.
+ - Specifies the DNS server addresses for subnet. The address in the head is used first.
type: list
returned: success
"""
diff --git a/plugins/modules/ibm_sa_domain.py b/plugins/modules/ibm_sa_domain.py
index 00b9ee1239..d34474b551 100644
--- a/plugins/modules/ibm_sa_domain.py
+++ b/plugins/modules/ibm_sa_domain.py
@@ -36,7 +36,7 @@ options:
type: str
ldap_id:
description:
- - LDAP id to add to the domain.
+ - LDAP ID to add to the domain.
required: false
type: str
size:
diff --git a/plugins/modules/ibm_sa_host.py b/plugins/modules/ibm_sa_host.py
index f6613b3b29..b3d80a6b62 100644
--- a/plugins/modules/ibm_sa_host.py
+++ b/plugins/modules/ibm_sa_host.py
@@ -41,8 +41,8 @@ options:
type: str
domain:
description:
- - The domains the cluster will be attached to. To include more than one domain, separate domain names with commas. To
- include all existing domains, use an asterisk (V(*)).
+ - The domains the cluster is attached to. To include more than one domain, separate domain names with commas. To include
+ all existing domains, use an asterisk (V(*)).
required: false
type: str
iscsi_chap_name:
diff --git a/plugins/modules/icinga2_host.py b/plugins/modules/icinga2_host.py
index 271a6387bc..d78f607aae 100644
--- a/plugins/modules/icinga2_host.py
+++ b/plugins/modules/icinga2_host.py
@@ -30,13 +30,13 @@ options:
- HTTP, HTTPS, or FTP URL in the form V((http|https|ftp\)://[user[:pass]]@host.domain[:port]/path).
use_proxy:
description:
- - If V(false), it will not use a proxy, even if one is defined in an environment variable on the target hosts.
+ - If V(false), it does not use a proxy, even if one is defined in an environment variable on the target hosts.
type: bool
default: true
validate_certs:
description:
- - If V(false), SSL certificates will not be validated. This should only be used on personally controlled sites using
- self-signed certificates.
+ - If V(false), SSL certificates are not validated. This should only be used on personally controlled sites using self-signed
+ certificates.
type: bool
default: true
url_username:
@@ -48,12 +48,12 @@ options:
type: str
description:
- The password for use in HTTP basic authentication.
- - If the O(url_username) parameter is not specified, the O(url_password) parameter will not be used.
+ - If the O(url_username) parameter is not specified, the O(url_password) parameter is not used.
force_basic_auth:
description:
- - Httplib2, the library used by the uri module only sends authentication information when a webservice responds to an
- initial request with a 401 status. Since some basic auth services do not properly send a 401, logins will fail. This
- option forces the sending of the Basic authentication header upon initial request.
+ - C(httplib2), the library used by Ansible's HTTP request code only sends authentication information when a webservice
+ responds to an initial request with a 401 status. Since some basic auth services do not properly send a 401, logins
+ may fail. This option forces the sending of the Basic authentication header upon initial request.
type: bool
default: false
client_cert:
@@ -235,11 +235,11 @@ def main():
state=dict(default="present", choices=["absent", "present"]),
name=dict(required=True, aliases=['host']),
zone=dict(),
- template=dict(default=None),
+ template=dict(),
check_command=dict(default="hostalive"),
- display_name=dict(default=None),
+ display_name=dict(),
ip=dict(),
- variables=dict(type='dict', default=None),
+ variables=dict(type='dict'),
)
# Define the main module
diff --git a/plugins/modules/idrac_redfish_command.py b/plugins/modules/idrac_redfish_command.py
index 531da53162..fa4f29e5f5 100644
--- a/plugins/modules/idrac_redfish_command.py
+++ b/plugins/modules/idrac_redfish_command.py
@@ -16,6 +16,7 @@ description:
- For use with Dell iDRAC operations that require Redfish OEM extensions.
extends_documentation_fragment:
- community.general.attributes
+ - community.general.redfish
attributes:
check_mode:
support: none
@@ -62,6 +63,12 @@ options:
- ID of the System, Manager or Chassis to modify.
type: str
version_added: '0.2.0'
+ validate_certs:
+ version_added: 10.6.0
+ ca_path:
+ version_added: 10.6.0
+ ciphers:
+ version_added: 10.6.0
author: "Jose Delarosa (@jose-delarosa)"
"""
@@ -93,7 +100,7 @@ return_values:
import re
from ansible.module_utils.basic import AnsibleModule
-from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils
+from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils, REDFISH_COMMON_ARGUMENT_SPEC
from ansible.module_utils.common.text.converters import to_native
@@ -147,17 +154,19 @@ CATEGORY_COMMANDS_ALL = {
def main():
result = {}
return_values = {}
+ argument_spec = dict(
+ category=dict(required=True),
+ command=dict(required=True, type='list', elements='str'),
+ baseuri=dict(required=True),
+ username=dict(),
+ password=dict(no_log=True),
+ auth_token=dict(no_log=True),
+ timeout=dict(type='int', default=10),
+ resource_id=dict()
+ )
+ argument_spec.update(REDFISH_COMMON_ARGUMENT_SPEC)
module = AnsibleModule(
- argument_spec=dict(
- category=dict(required=True),
- command=dict(required=True, type='list', elements='str'),
- baseuri=dict(required=True),
- username=dict(),
- password=dict(no_log=True),
- auth_token=dict(no_log=True),
- timeout=dict(type='int', default=10),
- resource_id=dict()
- ),
+ argument_spec,
required_together=[
('username', 'password'),
],
diff --git a/plugins/modules/idrac_redfish_config.py b/plugins/modules/idrac_redfish_config.py
index 97d7a62d04..466e0b344c 100644
--- a/plugins/modules/idrac_redfish_config.py
+++ b/plugins/modules/idrac_redfish_config.py
@@ -16,6 +16,7 @@ description:
- Builds Redfish URIs locally and sends them to remote iDRAC controllers to set or update a configuration attribute.
extends_documentation_fragment:
- community.general.attributes
+ - community.general.redfish
attributes:
check_mode:
support: none
@@ -71,6 +72,12 @@ options:
- ID of the System, Manager or Chassis to modify.
type: str
version_added: '0.2.0'
+ validate_certs:
+ version_added: 10.6.0
+ ca_path:
+ version_added: 10.6.0
+ ciphers:
+ version_added: 10.6.0
author: "Jose Delarosa (@jose-delarosa)"
"""
@@ -154,7 +161,7 @@ from ansible.module_utils.common.validation import (
check_mutually_exclusive,
check_required_arguments
)
-from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils
+from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils, REDFISH_COMMON_ARGUMENT_SPEC
from ansible.module_utils.common.text.converters import to_native
@@ -246,18 +253,20 @@ CATEGORY_COMMANDS_MUTUALLY_EXCLUSIVE = {
def main():
result = {}
+ argument_spec = dict(
+ category=dict(required=True),
+ command=dict(required=True, type='list', elements='str'),
+ baseuri=dict(required=True),
+ username=dict(),
+ password=dict(no_log=True),
+ auth_token=dict(no_log=True),
+ manager_attributes=dict(type='dict', default={}),
+ timeout=dict(type='int', default=10),
+ resource_id=dict()
+ )
+ argument_spec.update(REDFISH_COMMON_ARGUMENT_SPEC)
module = AnsibleModule(
- argument_spec=dict(
- category=dict(required=True),
- command=dict(required=True, type='list', elements='str'),
- baseuri=dict(required=True),
- username=dict(),
- password=dict(no_log=True),
- auth_token=dict(no_log=True),
- manager_attributes=dict(type='dict', default={}),
- timeout=dict(type='int', default=10),
- resource_id=dict()
- ),
+ argument_spec,
required_together=[
('username', 'password'),
],
diff --git a/plugins/modules/idrac_redfish_info.py b/plugins/modules/idrac_redfish_info.py
index 3a8ea8103f..4b9745f7c2 100644
--- a/plugins/modules/idrac_redfish_info.py
+++ b/plugins/modules/idrac_redfish_info.py
@@ -17,6 +17,7 @@ description:
extends_documentation_fragment:
- community.general.attributes
- community.general.attributes.info_module
+ - community.general.redfish
attributes:
check_mode:
version_added: 3.3.0
@@ -57,6 +58,12 @@ options:
- Timeout in seconds for HTTP requests to iDRAC.
default: 10
type: int
+ validate_certs:
+ version_added: 10.6.0
+ ca_path:
+ version_added: 10.6.0
+ ciphers:
+ version_added: 10.6.0
author: "Jose Delarosa (@jose-delarosa)"
"""
@@ -124,7 +131,7 @@ msg:
"""
from ansible.module_utils.basic import AnsibleModule
-from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils
+from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils, REDFISH_COMMON_ARGUMENT_SPEC
from ansible.module_utils.common.text.converters import to_native
@@ -177,16 +184,18 @@ CATEGORY_COMMANDS_ALL = {
def main():
result = {}
+ argument_spec = dict(
+ category=dict(required=True),
+ command=dict(required=True, type='list', elements='str'),
+ baseuri=dict(required=True),
+ username=dict(),
+ password=dict(no_log=True),
+ auth_token=dict(no_log=True),
+ timeout=dict(type='int', default=10)
+ )
+ argument_spec.update(REDFISH_COMMON_ARGUMENT_SPEC)
module = AnsibleModule(
- argument_spec=dict(
- category=dict(required=True),
- command=dict(required=True, type='list', elements='str'),
- baseuri=dict(required=True),
- username=dict(),
- password=dict(no_log=True),
- auth_token=dict(no_log=True),
- timeout=dict(type='int', default=10)
- ),
+ argument_spec,
required_together=[
('username', 'password'),
],
diff --git a/plugins/modules/ilo_redfish_command.py b/plugins/modules/ilo_redfish_command.py
index 3e698fc049..52b08f8654 100644
--- a/plugins/modules/ilo_redfish_command.py
+++ b/plugins/modules/ilo_redfish_command.py
@@ -19,6 +19,7 @@ attributes:
support: none
extends_documentation_fragment:
- community.general.attributes
+ - community.general.redfish
options:
category:
required: true
@@ -58,6 +59,12 @@ options:
- Timeout in seconds for HTTP requests to iLO.
default: 60
type: int
+ validate_certs:
+ version_added: 10.6.0
+ ca_path:
+ version_added: 10.6.0
+ ciphers:
+ version_added: 10.6.0
author:
- Varni H P (@varini-hp)
"""
@@ -96,22 +103,25 @@ CATEGORY_COMMANDS_ALL = {
}
from ansible_collections.community.general.plugins.module_utils.ilo_redfish_utils import iLORedfishUtils
+from ansible_collections.community.general.plugins.module_utils.redfish_utils import REDFISH_COMMON_ARGUMENT_SPEC
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.common.text.converters import to_native
def main():
result = {}
+ argument_spec = dict(
+ category=dict(required=True, choices=list(CATEGORY_COMMANDS_ALL.keys())),
+ command=dict(required=True, type='list', elements='str'),
+ baseuri=dict(required=True),
+ timeout=dict(type="int", default=60),
+ username=dict(),
+ password=dict(no_log=True),
+ auth_token=dict(no_log=True)
+ )
+ argument_spec.update(REDFISH_COMMON_ARGUMENT_SPEC)
module = AnsibleModule(
- argument_spec=dict(
- category=dict(required=True, choices=list(CATEGORY_COMMANDS_ALL.keys())),
- command=dict(required=True, type='list', elements='str'),
- baseuri=dict(required=True),
- timeout=dict(type="int", default=60),
- username=dict(),
- password=dict(no_log=True),
- auth_token=dict(no_log=True)
- ),
+ argument_spec,
required_together=[
('username', 'password'),
],
diff --git a/plugins/modules/ilo_redfish_config.py b/plugins/modules/ilo_redfish_config.py
index fdda339ab3..95c45590e1 100644
--- a/plugins/modules/ilo_redfish_config.py
+++ b/plugins/modules/ilo_redfish_config.py
@@ -15,6 +15,7 @@ description:
- For use with HPE iLO operations that require Redfish OEM extensions.
extends_documentation_fragment:
- community.general.attributes
+ - community.general.redfish
attributes:
check_mode:
support: none
@@ -65,6 +66,12 @@ options:
description:
- Value of the attribute to be configured.
type: str
+ validate_certs:
+ version_added: 10.6.0
+ ca_path:
+ version_added: 10.6.0
+ ciphers:
+ version_added: 10.6.0
author:
- "Bhavya B (@bhavya06)"
"""
@@ -113,25 +120,28 @@ CATEGORY_COMMANDS_ALL = {
}
from ansible_collections.community.general.plugins.module_utils.ilo_redfish_utils import iLORedfishUtils
+from ansible_collections.community.general.plugins.module_utils.redfish_utils import REDFISH_COMMON_ARGUMENT_SPEC
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.common.text.converters import to_native
def main():
result = {}
+ argument_spec = dict(
+ category=dict(required=True, choices=list(
+ CATEGORY_COMMANDS_ALL.keys())),
+ command=dict(required=True, type='list', elements='str'),
+ baseuri=dict(required=True),
+ username=dict(),
+ password=dict(no_log=True),
+ auth_token=dict(no_log=True),
+ attribute_name=dict(required=True),
+ attribute_value=dict(type='str'),
+ timeout=dict(type='int', default=10)
+ )
+ argument_spec.update(REDFISH_COMMON_ARGUMENT_SPEC)
module = AnsibleModule(
- argument_spec=dict(
- category=dict(required=True, choices=list(
- CATEGORY_COMMANDS_ALL.keys())),
- command=dict(required=True, type='list', elements='str'),
- baseuri=dict(required=True),
- username=dict(),
- password=dict(no_log=True),
- auth_token=dict(no_log=True),
- attribute_name=dict(required=True),
- attribute_value=dict(type='str'),
- timeout=dict(type='int', default=10)
- ),
+ argument_spec,
required_together=[
('username', 'password'),
],
diff --git a/plugins/modules/ilo_redfish_info.py b/plugins/modules/ilo_redfish_info.py
index 3bd379e80a..daa43b004e 100644
--- a/plugins/modules/ilo_redfish_info.py
+++ b/plugins/modules/ilo_redfish_info.py
@@ -16,6 +16,7 @@ description:
extends_documentation_fragment:
- community.general.attributes
- community.general.attributes.info_module
+ - community.general.redfish
options:
category:
required: true
@@ -51,6 +52,12 @@ options:
- Timeout in seconds for HTTP requests to iLO.
default: 10
type: int
+ validate_certs:
+ version_added: 10.6.0
+ ca_path:
+ version_added: 10.6.0
+ ciphers:
+ version_added: 10.6.0
author:
- "Bhavya B (@bhavya06)"
"""
@@ -108,21 +115,24 @@ CATEGORY_COMMANDS_DEFAULT = {
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.general.plugins.module_utils.ilo_redfish_utils import iLORedfishUtils
+from ansible_collections.community.general.plugins.module_utils.redfish_utils import REDFISH_COMMON_ARGUMENT_SPEC
def main():
result = {}
category_list = []
+ argument_spec = dict(
+ category=dict(required=True, type='list', elements='str'),
+ command=dict(required=True, type='list', elements='str'),
+ baseuri=dict(required=True),
+ username=dict(),
+ password=dict(no_log=True),
+ auth_token=dict(no_log=True),
+ timeout=dict(type='int', default=10)
+ )
+ argument_spec.update(REDFISH_COMMON_ARGUMENT_SPEC)
module = AnsibleModule(
- argument_spec=dict(
- category=dict(required=True, type='list', elements='str'),
- command=dict(required=True, type='list', elements='str'),
- baseuri=dict(required=True),
- username=dict(),
- password=dict(no_log=True),
- auth_token=dict(no_log=True),
- timeout=dict(type='int', default=10)
- ),
+ argument_spec,
required_together=[
('username', 'password'),
],
diff --git a/plugins/modules/imc_rest.py b/plugins/modules/imc_rest.py
index d9313b973c..674ba0d2b3 100644
--- a/plugins/modules/imc_rest.py
+++ b/plugins/modules/imc_rest.py
@@ -57,8 +57,8 @@ options:
description:
- When used instead of O(path), sets the content of the API requests directly.
- This may be convenient to template simple requests, for anything complex use the M(ansible.builtin.template) module.
- - You can collate multiple IMC XML fragments and they will be processed sequentially in a single stream, the Cisco IMC
- output is subsequently merged.
+ - You can collate multiple IMC XML fragments and they are processed sequentially in a single stream, the Cisco IMC output
+ is subsequently merged.
- Parameter O(content) is mutual exclusive with parameter O(path).
type: str
protocol:
@@ -71,12 +71,12 @@ options:
description:
- The socket level timeout in seconds.
- This is the time that every single connection (every fragment) can spend. If this O(timeout) is reached, the module
- will fail with a C(Connection failure) indicating that C(The read operation timed out).
+ fails with a C(Connection failure) indicating that C(The read operation timed out).
default: 60
type: int
validate_certs:
description:
- - If V(false), SSL certificates will not be validated.
+ - If V(false), SSL certificates are not validated.
- This should only set to V(false) used on personally controlled sites using self-signed certificates.
type: bool
default: true
@@ -84,8 +84,8 @@ notes:
- The XML fragments do not need an authentication cookie, this is injected by the module automatically.
- The Cisco IMC XML output is being translated to JSON using the Cobra convention.
- Any configConfMo change requested has a return status of C(modified), even if there was no actual change from the previous
- configuration. As a result, this module will always report a change on subsequent runs. In case this behaviour is fixed
- in a future update to Cisco IMC, this module will automatically adapt.
+ configuration. As a result, this module always reports a change on subsequent runs. In case this behaviour is fixed in
+ a future update to Cisco IMC, this module is meant to automatically adapt.
- If you get a C(Connection failure) related to C(The read operation timed out) increase the O(timeout) parameter. Some
XML fragments can take longer than the default timeout.
- More information about the IMC REST API is available from
@@ -263,7 +263,7 @@ output:
response="yes"
errorCode="ERR-xml-parse-error"
invocationResult="594"
- errorDescr="XML PARSING ERROR: Element 'computeRackUnit', attribute 'admin_Power': The attribute 'admin_Power' is not allowed.\n"/>
+ errorDescr="XML PARSING ERROR: Element 'computeRackUnit', attribute 'admin_Power': The attribute 'admin_Power' is not allowed.\n" />
"""
import os
diff --git a/plugins/modules/imgadm.py b/plugins/modules/imgadm.py
index 344bf9cc56..1c29e8a94b 100644
--- a/plugins/modules/imgadm.py
+++ b/plugins/modules/imgadm.py
@@ -44,7 +44,7 @@ options:
choices: [present, absent, deleted, imported, updated, vacuumed]
description:
- State the object operated on should be in. V(imported) is an alias for for V(present) and V(deleted) for V(absent).
- When set to V(vacuumed) and O(uuid=*), it will remove all unused images.
+ When set to V(vacuumed) and O(uuid=*), it removes all unused images.
type: str
type:
diff --git a/plugins/modules/infinity.py b/plugins/modules/infinity.py
index 5a0f093247..cc54b46c51 100644
--- a/plugins/modules/infinity.py
+++ b/plugins/modules/infinity.py
@@ -67,7 +67,7 @@ options:
type: str
network_location:
description:
- - The parent network id for a given network.
+ - The parent network ID for a given network.
type: int
default: -1
network_type:
@@ -105,17 +105,18 @@ EXAMPLES = r"""
RETURN = r"""
network_id:
- description: Id for a given network.
+ description: ID for a given network.
returned: success
type: str
sample: '1501'
ip_info:
- description: When reserve next available ip address from a network, the ip address info ) is returned.
+ description: When reserve next available IP address from a network, the IP address info is returned.
returned: success
type: str
sample: '{"address": "192.168.10.3", "hostname": "", "FQDN": "", "domainname": "", "id": 3229}'
network_info:
- description: when reserving a LAN network from a Infinity supernet by providing network_size, the information about the reserved network is returned.
+ description: When reserving a LAN network from a Infinity supernet by providing network_size, the information about the
+ reserved network is returned.
returned: success
type: str
sample: {
@@ -125,7 +126,7 @@ network_info:
"network_size": null,
"description": null,
"network_location": "3085",
- "ranges": { "id": 0, "name": null,"first_ip": null,"type": null,"last_ip": null},
+ "ranges": {"id": 0, "name": null, "first_ip": null, "type": null, "last_ip": null},
"network_type": "lan",
"network_name": "'reserve_new_ansible_network'"
}
diff --git a/plugins/modules/influxdb_retention_policy.py b/plugins/modules/influxdb_retention_policy.py
index cdd6bafb6d..824c34bb7d 100644
--- a/plugins/modules/influxdb_retention_policy.py
+++ b/plugins/modules/influxdb_retention_policy.py
@@ -59,8 +59,9 @@ options:
default: false
shard_group_duration:
description:
- - Determines the time range covered by a shard group. If specified it must be at least one hour. If none, it's determined
- by InfluxDB by the rentention policy's duration. Supports complex duration expressions with multiple units.
+ - Determines the time range covered by a shard group. If specified it must be at least one hour. If not provided, it
+ is determined by InfluxDB by the rentention policy's duration. Supports complex duration expressions with multiple
+ units.
type: str
version_added: '2.0.0'
extends_documentation_fragment:
diff --git a/plugins/modules/influxdb_user.py b/plugins/modules/influxdb_user.py
index bc66ff693d..45410e76a5 100644
--- a/plugins/modules/influxdb_user.py
+++ b/plugins/modules/influxdb_user.py
@@ -37,7 +37,7 @@ options:
admin:
description:
- Whether the user should be in the admin role or not.
- - Since version 2.8, the role will also be updated.
+ - Since version 2.8, the role is also updated.
default: false
type: bool
state:
@@ -50,8 +50,8 @@ options:
description:
- Privileges to grant to this user.
- Takes a list of dicts containing the "database" and "privilege" keys.
- - If this argument is not provided, the current grants will be left alone.
- - If an empty list is provided, all grants for the user will be removed.
+ - If this argument is not provided, the current grants are left alone.
+ - If an empty list is provided, all grants for the user are removed.
type: list
elements: dict
extends_documentation_fragment:
@@ -101,9 +101,7 @@ EXAMPLES = r"""
state: absent
"""
-RETURN = r"""
-#only defaults
-"""
+RETURN = r"""#"""
import json
@@ -219,7 +217,7 @@ def main():
argument_spec.update(
state=dict(default='present', type='str', choices=['present', 'absent']),
user_name=dict(required=True, type='str'),
- user_password=dict(required=False, type='str', no_log=True),
+ user_password=dict(type='str', no_log=True),
admin=dict(default='False', type='bool'),
grants=dict(type='list', elements='dict'),
)
diff --git a/plugins/modules/ini_file.py b/plugins/modules/ini_file.py
index 61e6662d95..04fe92fa08 100644
--- a/plugins/modules/ini_file.py
+++ b/plugins/modules/ini_file.py
@@ -39,7 +39,7 @@ options:
section:
description:
- Section name in INI file. This is added if O(state=present) automatically when a single value is being set.
- - If being omitted, the O(option) will be placed before the first O(section).
+ - If being omitted, the O(option) is placed before the first O(section).
- Omitting O(section) is also required if the config format does not support sections.
type: str
section_has_values:
@@ -63,7 +63,7 @@ options:
elements: str
description:
- Among possibly multiple sections of the same name, select the first one that contains matching options and values.
- - With O(state=present), if a suitable section is not found, a new section will be added, including the required options.
+ - With O(state=present), if a suitable section is not found, a new section is added, including the required options.
- With O(state=absent), at most one O(section) is removed if it contains the values.
version_added: 8.6.0
option:
@@ -100,8 +100,8 @@ options:
O(option)s with the same name are not touched.
- If set to V(present) and O(exclusive) set to V(false) the specified O(option=values) lines are added, but the other
O(option)s with the same name are not touched.
- - If set to V(present) and O(exclusive) set to V(true) all given O(option=values) lines will be added and the other
- O(option)s with the same name are removed.
+ - If set to V(present) and O(exclusive) set to V(true) all given O(option=values) lines are added and the other O(option)s
+ with the same name are removed.
type: str
choices: [absent, present]
default: present
@@ -126,8 +126,8 @@ options:
version_added: 7.5.0
create:
description:
- - If set to V(false), the module will fail if the file does not already exist.
- - By default it will create the file if it is missing.
+ - If set to V(false), the module fails if the file does not already exist.
+ - By default it creates the file if it is missing.
type: bool
default: true
allow_no_value:
@@ -268,21 +268,21 @@ from ansible.module_utils.common.text.converters import to_bytes, to_text
def match_opt(option, line):
option = re.escape(option)
- return re.match('([#;]?)( |\t)*(%s)( |\t)*(=|$)( |\t)*(.*)' % option, line)
+ return re.match('( |\t)*([#;]?)( |\t)*(%s)( |\t)*(=|$)( |\t)*(.*)' % option, line)
def match_active_opt(option, line):
option = re.escape(option)
- return re.match('()( |\t)*(%s)( |\t)*(=|$)( |\t)*(.*)' % option, line)
+ return re.match('()()( |\t)*(%s)( |\t)*(=|$)( |\t)*(.*)' % option, line)
def update_section_line(option, changed, section_lines, index, changed_lines, ignore_spaces, newline, msg):
option_changed = None
if ignore_spaces:
old_match = match_opt(option, section_lines[index])
- if not old_match.group(1):
+ if not old_match.group(2):
new_match = match_opt(option, newline)
- option_changed = old_match.group(7) != new_match.group(7)
+ option_changed = old_match.group(8) != new_match.group(8)
if option_changed is None:
option_changed = section_lines[index] != newline
if option_changed:
@@ -299,7 +299,7 @@ def check_section_has_values(section_has_values, section_lines):
for condition in section_has_values:
for line in section_lines:
match = match_opt(condition["option"], line)
- if match and (len(condition["values"]) == 0 or match.group(7) in condition["values"]):
+ if match and (len(condition["values"]) == 0 or match.group(8) in condition["values"]):
break
else:
return False
@@ -432,8 +432,8 @@ def do_ini(module, filename, section=None, section_has_values=None, option=None,
for index, line in enumerate(section_lines):
if match_function(option, line):
match = match_function(option, line)
- if values and match.group(7) in values:
- matched_value = match.group(7)
+ if values and match.group(8) in values:
+ matched_value = match.group(8)
if not matched_value and allow_no_value:
# replace existing option with no value line(s)
newline = u'%s\n' % option
@@ -505,7 +505,7 @@ def do_ini(module, filename, section=None, section_has_values=None, option=None,
section_lines = new_section_lines
elif not exclusive and len(values) > 0:
# delete specified option=value line(s)
- new_section_lines = [i for i in section_lines if not (match_active_opt(option, i) and match_active_opt(option, i).group(7) in values)]
+ new_section_lines = [i for i in section_lines if not (match_active_opt(option, i) and match_active_opt(option, i).group(8) in values)]
if section_lines != new_section_lines:
changed = True
msg = 'option changed'
@@ -584,7 +584,7 @@ def main():
option=dict(type='str', required=True),
value=dict(type='str'),
values=dict(type='list', elements='str')
- ), default=None, mutually_exclusive=[['value', 'values']]),
+ ), mutually_exclusive=[['value', 'values']]),
option=dict(type='str'),
value=dict(type='str'),
values=dict(type='list', elements='str'),
diff --git a/plugins/modules/installp.py b/plugins/modules/installp.py
index e54a56949f..da88a7e7c2 100644
--- a/plugins/modules/installp.py
+++ b/plugins/modules/installp.py
@@ -47,7 +47,7 @@ options:
choices: [absent, present]
default: present
notes:
- - If the package is already installed, even the package/fileset is new, the module will not install it.
+ - If the package is already installed, even the package/fileset is new, the module does not install it.
"""
EXAMPLES = r"""
diff --git a/plugins/modules/interfaces_file.py b/plugins/modules/interfaces_file.py
index e878d10d1f..8e315d7b69 100644
--- a/plugins/modules/interfaces_file.py
+++ b/plugins/modules/interfaces_file.py
@@ -45,10 +45,10 @@ options:
value:
type: str
description:
- - If O(option) is not presented for the O(iface) and O(state) is V(present) option will be added. If O(option) already
- exists and is not V(pre-up), V(up), V(post-up) or V(down), it's value will be updated. V(pre-up), V(up), V(post-up)
- and V(down) options cannot be updated, only adding new options, removing existing ones or cleaning the whole option
- set are supported.
+ - If O(option) is not presented for the O(iface) and O(state) is V(present), then O(option) is added. If O(option) already
+ exists and is not V(pre-up), V(up), V(post-up) or V(down), its value is updated. V(pre-up), V(up), V(post-up) and
+ V(down) options cannot be updated, only adding new options, removing existing ones or cleaning the whole option set
+ are supported.
backup:
description:
- Create a backup file including the timestamp information so you can get the original file back if you somehow clobbered
@@ -58,12 +58,12 @@ options:
state:
type: str
description:
- - If set to V(absent) the option or section will be removed if present instead of created.
+ - If set to V(absent) the option or section is removed if present instead of created.
default: "present"
choices: ["present", "absent"]
notes:
- - If option is defined multiple times last one will be updated but all will be deleted in case of an absent state.
+ - If option is defined multiple times last one is updated but all others are deleted in case of an O(state=absent).
requirements: []
author: "Roman Belyakovsky (@hryamzik)"
"""
diff --git a/plugins/modules/ip_netns.py b/plugins/modules/ip_netns.py
index 69534c810d..6bcae8e5f2 100644
--- a/plugins/modules/ip_netns.py
+++ b/plugins/modules/ip_netns.py
@@ -7,37 +7,36 @@
from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
module: ip_netns
author: "Arie Bregman (@bregman-arie)"
short_description: Manage network namespaces
-requirements: [ ip ]
+requirements: [ip]
description:
- - Create or delete network namespaces using the ip command.
+ - Create or delete network namespaces using the C(ip) command.
extends_documentation_fragment:
- - community.general.attributes
+ - community.general.attributes
attributes:
- check_mode:
- support: full
- diff_mode:
- support: none
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
options:
- name:
- required: false
- description:
- - Name of the namespace
- type: str
- state:
- required: false
- default: "present"
- choices: [ present, absent ]
- description:
- - Whether the namespace should exist
- type: str
-'''
+ name:
+ required: false
+ description:
+ - Name of the namespace.
+ type: str
+ state:
+ required: false
+ default: "present"
+ choices: [present, absent]
+ description:
+ - Whether the namespace should exist.
+ type: str
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Create a namespace named mario
community.general.ip_netns:
name: mario
@@ -47,11 +46,11 @@ EXAMPLES = '''
community.general.ip_netns:
name: luigi
state: absent
-'''
+"""
-RETURN = '''
+RETURN = r"""
# Default return values
-'''
+"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.common.text.converters import to_text
diff --git a/plugins/modules/ipa_config.py b/plugins/modules/ipa_config.py
index 871643fd7b..ea08f8f8ba 100644
--- a/plugins/modules/ipa_config.py
+++ b/plugins/modules/ipa_config.py
@@ -7,8 +7,7 @@
from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = r'''
----
+DOCUMENTATION = r"""
module: ipa_config
author: Fran Fitzpatrick (@fxfitz)
short_description: Manage Global FreeIPA Configuration Settings
@@ -115,10 +114,9 @@ options:
extends_documentation_fragment:
- community.general.ipa.documentation
- community.general.attributes
+"""
-'''
-
-EXAMPLES = r'''
+EXAMPLES = r"""
- name: Ensure password plugin features DC:Disable Last Success and KDC:Disable Lockout are enabled
community.general.ipa_config:
ipaconfigstring: ["KDC:Disable Last Success", "KDC:Disable Lockout"]
@@ -221,14 +219,14 @@ EXAMPLES = r'''
ipa_host: localhost
ipa_user: admin
ipa_pass: supersecret
-'''
+"""
-RETURN = r'''
+RETURN = r"""
config:
description: Configuration as returned by IPA API.
returned: always
type: dict
-'''
+"""
import traceback
diff --git a/plugins/modules/ipa_dnsrecord.py b/plugins/modules/ipa_dnsrecord.py
index 1dad138377..3cba35b11c 100644
--- a/plugins/modules/ipa_dnsrecord.py
+++ b/plugins/modules/ipa_dnsrecord.py
@@ -8,8 +8,7 @@
from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = r'''
----
+DOCUMENTATION = r"""
module: ipa_dnsrecord
author: Abhijeet Kasurde (@Akasurde)
short_description: Manage FreeIPA DNS records
@@ -23,64 +22,64 @@ attributes:
options:
zone_name:
description:
- - The DNS zone name to which DNS record needs to be managed.
+ - The DNS zone name to which DNS record needs to be managed.
required: true
type: str
record_name:
description:
- - The DNS record name to manage.
+ - The DNS record name to manage.
required: true
aliases: ["name"]
type: str
record_type:
description:
- - The type of DNS record name.
- - Support for V(NS) was added in comunity.general 8.2.0.
- - Support for V(SSHFP) was added in community.general 9.1.0.
+ - The type of DNS record name.
+ - Support for V(NS) was added in comunity.general 8.2.0.
+ - Support for V(SSHFP) was added in community.general 9.1.0.
required: false
default: 'A'
choices: ['A', 'AAAA', 'A6', 'CNAME', 'DNAME', 'MX', 'NS', 'PTR', 'SRV', 'TXT', 'SSHFP']
type: str
record_value:
description:
- - Manage DNS record name with this value.
- - Mutually exclusive with O(record_values), and exactly one of O(record_value) and O(record_values) has to be specified.
- - Use O(record_values) if you need to specify multiple values.
- - In the case of V(A) or V(AAAA) record types, this will be the IP address.
- - In the case of V(A6) record type, this will be the A6 Record data.
- - In the case of V(CNAME) record type, this will be the hostname.
- - In the case of V(DNAME) record type, this will be the DNAME target.
- - In the case of V(NS) record type, this will be the name server hostname. Hostname must already have a valid A or AAAA record.
- - In the case of V(PTR) record type, this will be the hostname.
- - In the case of V(TXT) record type, this will be a text.
- - In the case of V(SRV) record type, this will be a service record.
- - In the case of V(MX) record type, this will be a mail exchanger record.
- - In the case of V(SSHFP) record type, this will be an SSH fingerprint record.
+ - Manage DNS record name with this value.
+ - Mutually exclusive with O(record_values), and exactly one of O(record_value) and O(record_values) has to be specified.
+ - Use O(record_values) if you need to specify multiple values.
+ - In the case of V(A) or V(AAAA) record types, this is the IP address.
+ - In the case of V(A6) record type, this is the A6 Record data.
+ - In the case of V(CNAME) record type, this is the hostname.
+ - In the case of V(DNAME) record type, this is the DNAME target.
+ - In the case of V(NS) record type, this is the name server hostname. Hostname must already have a valid A or AAAA record.
+ - In the case of V(PTR) record type, this is the hostname.
+ - In the case of V(TXT) record type, this is a text.
+ - In the case of V(SRV) record type, this is a service record.
+ - In the case of V(MX) record type, this is a mail exchanger record.
+ - In the case of V(SSHFP) record type, this is an SSH fingerprint record.
type: str
record_values:
description:
- - Manage DNS record name with this value.
- - Mutually exclusive with O(record_value), and exactly one of O(record_value) and O(record_values) has to be specified.
- - In the case of V(A) or V(AAAA) record types, this will be the IP address.
- - In the case of V(A6) record type, this will be the A6 Record data.
- - In the case of V(CNAME) record type, this will be the hostname.
- - In the case of V(DNAME) record type, this will be the DNAME target.
- - In the case of V(NS) record type, this will be the name server hostname. Hostname must already have a valid A or AAAA record.
- - In the case of V(PTR) record type, this will be the hostname.
- - In the case of V(TXT) record type, this will be a text.
- - In the case of V(SRV) record type, this will be a service record.
- - In the case of V(MX) record type, this will be a mail exchanger record.
- - In the case of V(SSHFP) record type, this will be an SSH fingerprint record.
+ - Manage DNS record name with this value.
+ - Mutually exclusive with O(record_value), and exactly one of O(record_value) and O(record_values) has to be specified.
+ - In the case of V(A) or V(AAAA) record types, this is the IP address.
+ - In the case of V(A6) record type, this is the A6 Record data.
+ - In the case of V(CNAME) record type, this is the hostname.
+ - In the case of V(DNAME) record type, this is the DNAME target.
+ - In the case of V(NS) record type, this is the name server hostname. Hostname must already have a valid A or AAAA record.
+ - In the case of V(PTR) record type, this is the hostname.
+ - In the case of V(TXT) record type, this is a text.
+ - In the case of V(SRV) record type, this is a service record.
+ - In the case of V(MX) record type, this is a mail exchanger record.
+ - In the case of V(SSHFP) record type, this is an SSH fingerprint record.
type: list
elements: str
record_ttl:
description:
- - Set the TTL for the record.
- - Applies only when adding a new or changing the value of O(record_value) or O(record_values).
+ - Set the TTL for the record.
+ - Applies only when adding a new or changing the value of O(record_value) or O(record_values).
required: false
type: int
state:
- description: State to ensure
+ description: State to ensure.
required: false
default: present
choices: ["absent", "present"]
@@ -88,10 +87,9 @@ options:
extends_documentation_fragment:
- community.general.ipa.documentation
- community.general.attributes
+"""
-'''
-
-EXAMPLES = r'''
+EXAMPLES = r"""
- name: Ensure dns record is present
community.general.ipa_dnsrecord:
ipa_host: spider.example.com
@@ -167,7 +165,7 @@ EXAMPLES = r'''
state: absent
- name: Ensure an NS record for a subdomain is present
- community,general.ipa_dnsrecord:
+ community.general.ipa_dnsrecord:
name: subdomain
zone_name: example.com
record_type: 'NS'
@@ -189,14 +187,14 @@ EXAMPLES = r'''
ipa_host: ipa.example.com
ipa_user: admin
ipa_pass: ChangeMe!
-'''
+"""
-RETURN = r'''
+RETURN = r"""
dnsrecord:
description: DNS record as returned by IPA API.
returned: always
type: dict
-'''
+"""
import traceback
@@ -355,7 +353,7 @@ def main():
record_value=dict(type='str'),
record_values=dict(type='list', elements='str'),
state=dict(type='str', default='present', choices=['present', 'absent']),
- record_ttl=dict(type='int', required=False),
+ record_ttl=dict(type='int'),
)
module = AnsibleModule(
diff --git a/plugins/modules/ipa_dnszone.py b/plugins/modules/ipa_dnszone.py
index 6699b0525b..81a99bc54b 100644
--- a/plugins/modules/ipa_dnszone.py
+++ b/plugins/modules/ipa_dnszone.py
@@ -8,13 +8,12 @@
from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = r'''
----
+DOCUMENTATION = r"""
module: ipa_dnszone
author: Fran Fitzpatrick (@fxfitz)
short_description: Manage FreeIPA DNS Zones
description:
- - Add and delete an IPA DNS Zones using IPA API
+ - Add and delete an IPA DNS Zones using IPA API.
attributes:
check_mode:
support: full
@@ -23,11 +22,11 @@ attributes:
options:
zone_name:
description:
- - The DNS zone name to which needs to be managed.
+ - The DNS zone name to which needs to be managed.
required: true
type: str
state:
- description: State to ensure
+ description: State to ensure.
required: false
default: present
choices: ["absent", "present"]
@@ -44,10 +43,9 @@ options:
extends_documentation_fragment:
- community.general.ipa.documentation
- community.general.attributes
+"""
-'''
-
-EXAMPLES = r'''
+EXAMPLES = r"""
- name: Ensure dns zone is present
community.general.ipa_dnszone:
ipa_host: spider.example.com
@@ -78,14 +76,14 @@ EXAMPLES = r'''
state: present
zone_name: example.com
allowsyncptr: true
-'''
+"""
-RETURN = r'''
+RETURN = r"""
zone:
description: DNS zone as returned by IPA API.
returned: always
type: dict
-'''
+"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec
@@ -175,8 +173,8 @@ def main():
argument_spec = ipa_argument_spec()
argument_spec.update(zone_name=dict(type='str', required=True),
state=dict(type='str', default='present', choices=['present', 'absent']),
- dynamicupdate=dict(type='bool', required=False, default=False),
- allowsyncptr=dict(type='bool', required=False, default=False),
+ dynamicupdate=dict(type='bool', default=False),
+ allowsyncptr=dict(type='bool', default=False),
)
module = AnsibleModule(argument_spec=argument_spec,
diff --git a/plugins/modules/ipa_getkeytab.py b/plugins/modules/ipa_getkeytab.py
index 3d4f81d5b1..7c533fb729 100644
--- a/plugins/modules/ipa_getkeytab.py
+++ b/plugins/modules/ipa_getkeytab.py
@@ -8,8 +8,7 @@ from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = r'''
----
+DOCUMENTATION = r"""
module: ipa_getkeytab
short_description: Manage keytab file in FreeIPA
version_added: 9.5.0
@@ -68,21 +67,22 @@ options:
retrieve_mode:
description:
- Retrieve an existing key from the server instead of generating a new one.
- - This is incompatible with the O(password), and will work only against a IPA server more recent than version 3.3.
+ - This is incompatible with the O(password), and works only against a IPA server more recent than version 3.3.
- The user requesting the keytab must have access to the keys for this operation to succeed.
- - Be aware that if set V(true), a new keytab will be generated.
+ - Be aware that if set V(true), a new keytab is generated.
- This invalidates all previously retrieved keytabs for this service principal.
type: bool
encryption_types:
description:
- The list of encryption types to use to generate keys.
- - It will use local client defaults if not provided.
+ - It uses local client defaults if not provided.
- Valid values depend on the Kerberos library version and configuration.
type: str
state:
description:
- The state of the keytab file.
- - V(present) only check for existence of a file, if you want to recreate keytab with other parameters you should set O(force=true).
+ - V(present) only check for existence of a file, if you want to recreate keytab with other parameters you should set
+ O(force=true).
type: str
default: present
choices: ["present", "absent"]
@@ -95,14 +95,12 @@ requirements:
- Managed host is FreeIPA client
extends_documentation_fragment:
- community.general.attributes
-'''
+"""
-EXAMPLES = r'''
-- name: Get kerberos ticket
- ansible.builtin.shell: kinit admin
- args:
- stdin: "{{ aldpro_admin_password }}"
- changed_when: true
+EXAMPLES = r"""
+- name: Get Kerberos ticket using default principal
+ community.general.krb_ticket:
+ password: "{{ aldpro_admin_password }}"
- name: Create keytab
community.general.ipa_getkeytab:
@@ -123,7 +121,7 @@ EXAMPLES = r'''
principal: HTTP/freeipa-dc02.ipa.test
ipa_host: freeipa-dc01.ipa.test
force: true
-'''
+"""
import os
diff --git a/plugins/modules/ipa_group.py b/plugins/modules/ipa_group.py
index 92470606fc..934e533dff 100644
--- a/plugins/modules/ipa_group.py
+++ b/plugins/modules/ipa_group.py
@@ -7,13 +7,12 @@
from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = r'''
----
+DOCUMENTATION = r"""
module: ipa_group
author: Thomas Krahn (@Nosmoht)
short_description: Manage FreeIPA group
description:
- - Add, modify and delete group within IPA server
+ - Add, modify and delete group within IPA server.
attributes:
check_mode:
support: full
@@ -22,77 +21,76 @@ attributes:
options:
append:
description:
- - If V(true), add the listed O(user) and O(group) to the group members.
- - If V(false), only the listed O(user) and O(group) will be group members, removing any other members.
+ - If V(true), add the listed O(user) and O(group) to the group members.
+ - If V(false), only the listed O(user) and O(group) are set as group members, removing any other members.
default: false
type: bool
version_added: 4.0.0
cn:
description:
- - Canonical name.
- - Can not be changed as it is the unique identifier.
+ - Canonical name.
+ - Can not be changed as it is the unique identifier.
required: true
aliases: ['name']
type: str
description:
description:
- - Description of the group.
+ - Description of the group.
type: str
external:
description:
- - Allow adding external non-IPA members from trusted domains.
+ - Allow adding external non-IPA members from trusted domains.
type: bool
gidnumber:
description:
- - GID (use this option to set it manually).
+ - GID (use this option to set it manually).
aliases: ['gid']
type: str
group:
description:
- - List of group names assigned to this group.
- - If O(append=false) and an empty list is passed all groups will be removed from this group.
- - Groups that are already assigned but not passed will be removed.
- - If O(append=true) the listed groups will be assigned without removing other groups.
- - If option is omitted assigned groups will not be checked or changed.
+ - List of group names assigned to this group.
+ - If O(append=false) and an empty list is passed all groups are removed from this group.
+ - Groups that are already assigned but not passed are removed.
+ - If O(append=true) the listed groups are assigned without removing other groups.
+ - If option is omitted assigned groups are not checked or changed.
type: list
elements: str
nonposix:
description:
- - Create as a non-POSIX group.
+ - Create as a non-POSIX group.
type: bool
user:
description:
- - List of user names assigned to this group.
- - If O(append=false) and an empty list is passed all users will be removed from this group.
- - Users that are already assigned but not passed will be removed.
- - If O(append=true) the listed users will be assigned without removing other users.
- - If option is omitted assigned users will not be checked or changed.
+ - List of user names assigned to this group.
+ - If O(append=false) and an empty list is passed all users are removed from this group.
+ - Users that are already assigned but not passed are removed.
+ - If O(append=true) the listed users are assigned without removing other users.
+ - If option is omitted assigned users are not checked or changed.
type: list
elements: str
external_user:
description:
- - List of external users assigned to this group.
- - Behaves identically to O(user) with respect to O(append) attribute.
- - List entries can be in V(DOMAIN\\\\username) or SID format.
- - Unless SIDs are provided, the module will always attempt to make changes even if the group already has all the users.
- This is because only SIDs are returned by IPA query.
- - O(external=true) is needed for this option to work.
+ - List of external users assigned to this group.
+ - Behaves identically to O(user) with respect to O(append) attribute.
+ - List entries can be in V(DOMAIN\\\\username) or SID format.
+ - Unless SIDs are provided, the module always attempts to make changes even if the group already has all the users.
+ This is because only SIDs are returned by IPA query.
+ - O(external=true) is needed for this option to work.
type: list
elements: str
version_added: 6.3.0
state:
description:
- - State to ensure
+ - State to ensure.
default: "present"
choices: ["absent", "present"]
type: str
extends_documentation_fragment:
- community.general.ipa.documentation
- community.general.attributes
+"""
-'''
-
-EXAMPLES = r'''
+EXAMPLES = r"""
- name: Ensure group is present
community.general.ipa_group:
name: oinstall
@@ -106,8 +104,8 @@ EXAMPLES = r'''
community.general.ipa_group:
name: ops
group:
- - sysops
- - appops
+ - sysops
+ - appops
ipa_host: ipa.example.com
ipa_user: admin
ipa_pass: topsecret
@@ -116,8 +114,8 @@ EXAMPLES = r'''
community.general.ipa_group:
name: sysops
user:
- - linus
- - larry
+ - linus
+ - larry
ipa_host: ipa.example.com
ipa_user: admin
ipa_pass: topsecret
@@ -126,7 +124,7 @@ EXAMPLES = r'''
community.general.ipa_group:
name: developers
user:
- - john
+ - john
append: true
state: present
ipa_host: ipa.example.com
@@ -135,25 +133,25 @@ EXAMPLES = r'''
- name: Add external user to a group
community.general.ipa_group:
- name: developers
- external: true
- append: true
- external_user:
- - S-1-5-21-123-1234-12345-63421
- ipa_host: ipa.example.com
- ipa_user: admin
- ipa_pass: topsecret
+ name: developers
+ external: true
+ append: true
+ external_user:
+ - S-1-5-21-123-1234-12345-63421
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
- name: Add a user from MYDOMAIN
community.general.ipa_group:
- name: developers
- external: true
- append: true
- external_user:
- - MYDOMAIN\\john
- ipa_host: ipa.example.com
- ipa_user: admin
- ipa_pass: topsecret
+ name: developers
+ external: true
+ append: true
+ external_user:
+ - MYDOMAIN\\john
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
- name: Ensure group is absent
community.general.ipa_group:
@@ -162,14 +160,14 @@ EXAMPLES = r'''
ipa_host: ipa.example.com
ipa_user: admin
ipa_pass: topsecret
-'''
+"""
-RETURN = r'''
+RETURN = r"""
group:
- description: Group as returned by IPA API
+ description: Group as returned by IPA API.
returned: always
type: dict
-'''
+"""
import traceback
@@ -263,7 +261,7 @@ def ensure(module, client):
nonposix=module.params['nonposix'])
ipa_group = client.group_find(name=name)
- if (not (external or external_user is None)):
+ if not (external or external_user is None):
module.fail_json("external_user can only be set if external = True")
changed = False
diff --git a/plugins/modules/ipa_hbacrule.py b/plugins/modules/ipa_hbacrule.py
index 77a4d0d487..cb828f68e9 100644
--- a/plugins/modules/ipa_hbacrule.py
+++ b/plugins/modules/ipa_hbacrule.py
@@ -7,8 +7,7 @@
from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = r'''
----
+DOCUMENTATION = r"""
module: ipa_hbacrule
author: Thomas Krahn (@Nosmoht)
short_description: Manage FreeIPA HBAC rule
@@ -22,99 +21,98 @@ attributes:
options:
cn:
description:
- - Canonical name.
- - Can not be changed as it is the unique identifier.
+ - Canonical name.
+ - Can not be changed as it is the unique identifier.
required: true
aliases: ["name"]
type: str
description:
- description: Description
+ description: Description.
type: str
host:
description:
- - List of host names to assign.
- - If an empty list is passed all hosts will be removed from the rule.
- - If option is omitted hosts will not be checked or changed.
+ - List of host names to assign.
+ - If an empty list is passed all hosts are removed from the rule.
+ - If option is omitted hosts are not checked or changed.
required: false
type: list
elements: str
hostcategory:
- description: Host category
+ description: Host category.
choices: ['all']
type: str
hostgroup:
description:
- - List of hostgroup names to assign.
- - If an empty list is passed all hostgroups will be removed. from the rule
- - If option is omitted hostgroups will not be checked or changed.
+ - List of hostgroup names to assign.
+ - If an empty list is passed all hostgroups are removed from the rule.
+ - If option is omitted hostgroups are not checked or changed.
type: list
elements: str
service:
description:
- - List of service names to assign.
- - If an empty list is passed all services will be removed from the rule.
- - If option is omitted services will not be checked or changed.
+ - List of service names to assign.
+ - If an empty list is passed all services are removed from the rule.
+ - If option is omitted services are not checked or changed.
type: list
elements: str
servicecategory:
- description: Service category
+ description: Service category.
choices: ['all']
type: str
servicegroup:
description:
- - List of service group names to assign.
- - If an empty list is passed all assigned service groups will be removed from the rule.
- - If option is omitted service groups will not be checked or changed.
+ - List of service group names to assign.
+ - If an empty list is passed all assigned service groups are removed from the rule.
+ - If option is omitted service groups are not checked or changed.
type: list
elements: str
sourcehost:
description:
- - List of source host names to assign.
- - If an empty list if passed all assigned source hosts will be removed from the rule.
- - If option is omitted source hosts will not be checked or changed.
+ - List of source host names to assign.
+ - If an empty list if passed all assigned source hosts are removed from the rule.
+ - If option is omitted source hosts are not checked or changed.
type: list
elements: str
sourcehostcategory:
- description: Source host category
+ description: Source host category.
choices: ['all']
type: str
sourcehostgroup:
description:
- - List of source host group names to assign.
- - If an empty list if passed all assigned source host groups will be removed from the rule.
- - If option is omitted source host groups will not be checked or changed.
+ - List of source host group names to assign.
+ - If an empty list if passed all assigned source host groups are removed from the rule.
+ - If option is omitted source host groups are not checked or changed.
type: list
elements: str
state:
- description: State to ensure
+ description: State to ensure.
default: "present"
- choices: ["absent", "disabled", "enabled","present"]
+ choices: ["absent", "disabled", "enabled", "present"]
type: str
user:
description:
- - List of user names to assign.
- - If an empty list if passed all assigned users will be removed from the rule.
- - If option is omitted users will not be checked or changed.
+ - List of user names to assign.
+ - If an empty list if passed all assigned users are removed from the rule.
+ - If option is omitted users are not checked or changed.
type: list
elements: str
usercategory:
- description: User category
+ description: User category.
choices: ['all']
type: str
usergroup:
description:
- - List of user group names to assign.
- - If an empty list if passed all assigned user groups will be removed from the rule.
- - If option is omitted user groups will not be checked or changed.
+ - List of user group names to assign.
+ - If an empty list if passed all assigned user groups are removed from the rule.
+ - If option is omitted user groups are not checked or changed.
type: list
elements: str
extends_documentation_fragment:
- community.general.ipa.documentation
- community.general.attributes
+"""
-'''
-
-EXAMPLES = r'''
+EXAMPLES = r"""
- name: Ensure rule to allow all users to access any host from any host
community.general.ipa_hbacrule:
name: allow_all
@@ -132,9 +130,9 @@ EXAMPLES = r'''
name: allow_all_developers_access_to_db
description: Allow all developers to access any database from any host
hostgroup:
- - db-server
+ - db-server
usergroup:
- - developers
+ - developers
state: present
ipa_host: ipa.example.com
ipa_user: admin
@@ -147,14 +145,14 @@ EXAMPLES = r'''
ipa_host: ipa.example.com
ipa_user: admin
ipa_pass: topsecret
-'''
+"""
-RETURN = r'''
+RETURN = r"""
hbacrule:
description: HBAC rule as returned by IPA API.
returned: always
type: dict
-'''
+"""
import traceback
diff --git a/plugins/modules/ipa_host.py b/plugins/modules/ipa_host.py
index a30f6b1ea2..c88f3c0adb 100644
--- a/plugins/modules/ipa_host.py
+++ b/plugins/modules/ipa_host.py
@@ -7,8 +7,7 @@
from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = r'''
----
+DOCUMENTATION = r"""
module: ipa_host
author: Thomas Krahn (@Nosmoht)
short_description: Manage FreeIPA host
@@ -22,73 +21,73 @@ attributes:
options:
fqdn:
description:
- - Full qualified domain name.
- - Can not be changed as it is the unique identifier.
+ - Full qualified domain name.
+ - Can not be changed as it is the unique identifier.
required: true
aliases: ["name"]
type: str
description:
description:
- - A description of this host.
+ - A description of this host.
type: str
force:
description:
- - Force host name even if not in DNS.
+ - Force host name even if not in DNS.
required: false
type: bool
ip_address:
description:
- - Add the host to DNS with this IP address.
+ - Add the host to DNS with this IP address.
type: str
mac_address:
description:
- - List of Hardware MAC address(es) off this host.
- - If option is omitted MAC addresses will not be checked or changed.
- - If an empty list is passed all assigned MAC addresses will be removed.
- - MAC addresses that are already assigned but not passed will be removed.
+ - List of Hardware MAC address(es) off this host.
+ - If option is omitted MAC addresses are not checked nor changed.
+ - If an empty list is passed all assigned MAC addresses are removed.
+ - MAC addresses that are already assigned but not passed are removed.
aliases: ["macaddress"]
type: list
elements: str
ns_host_location:
description:
- - Host location (e.g. "Lab 2")
+ - Host location (for example V(Lab 2)).
aliases: ["nshostlocation"]
type: str
ns_hardware_platform:
description:
- - Host hardware platform (e.g. "Lenovo T61")
+ - Host hardware platform (for example V(Lenovo T61")).
aliases: ["nshardwareplatform"]
type: str
ns_os_version:
description:
- - Host operating system and version (e.g. "Fedora 9")
+ - Host operating system and version (for example V(Fedora 9)).
aliases: ["nsosversion"]
type: str
user_certificate:
description:
- - List of Base-64 encoded server certificates.
- - If option is omitted certificates will not be checked or changed.
- - If an empty list is passed all assigned certificates will be removed.
- - Certificates already assigned but not passed will be removed.
+ - List of Base-64 encoded server certificates.
+ - If option is omitted certificates are not checked nor changed.
+ - If an empty list is passed all assigned certificates are removed.
+ - Certificates already assigned but not passed are removed.
aliases: ["usercertificate"]
type: list
elements: str
state:
description:
- - State to ensure.
+ - State to ensure.
default: present
choices: ["absent", "disabled", "enabled", "present"]
type: str
force_creation:
description:
- - Create host if O(state=disabled) or O(state=enabled) but not present.
+ - Create host if O(state=disabled) or O(state=enabled) but not present.
default: true
type: bool
version_added: 9.5.0
update_dns:
description:
- - If set V(true) with O(state=absent), then removes DNS records of the host managed by FreeIPA DNS.
- - This option has no effect for states other than "absent".
+ - If set V(true) with O(state=absent), then removes DNS records of the host managed by FreeIPA DNS.
+ - This option has no effect for states other than V(absent).
type: bool
random_password:
description: Generate a random password to be used in bulk enrollment.
@@ -96,10 +95,9 @@ options:
extends_documentation_fragment:
- community.general.ipa.documentation
- community.general.attributes
+"""
-'''
-
-EXAMPLES = r'''
+EXAMPLES = r"""
- name: Ensure host is present
community.general.ipa_host:
name: host01.example.com
@@ -109,8 +107,8 @@ EXAMPLES = r'''
ns_os_version: CentOS 7
ns_hardware_platform: Lenovo T61
mac_address:
- - "08:00:27:E3:B1:2D"
- - "52:54:00:BD:97:1E"
+ - "08:00:27:E3:B1:2D"
+ - "52:54:00:BD:97:1E"
state: present
ipa_host: ipa.example.com
ipa_user: admin
@@ -159,18 +157,18 @@ EXAMPLES = r'''
ipa_user: admin
ipa_pass: topsecret
update_dns: true
-'''
+"""
-RETURN = r'''
+RETURN = r"""
host:
description: Host as returned by IPA API.
returned: always
type: dict
host_diff:
- description: List of options that differ and would be changed
+ description: List of options that differ and would be changed.
returned: if check mode and a difference is found
type: list
-'''
+"""
import traceback
diff --git a/plugins/modules/ipa_hostgroup.py b/plugins/modules/ipa_hostgroup.py
index 9e6abf32aa..ffe87fca4c 100644
--- a/plugins/modules/ipa_hostgroup.py
+++ b/plugins/modules/ipa_hostgroup.py
@@ -7,8 +7,7 @@
from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = r'''
----
+DOCUMENTATION = r"""
module: ipa_hostgroup
author: Thomas Krahn (@Nosmoht)
short_description: Manage FreeIPA host-group
@@ -22,61 +21,61 @@ attributes:
options:
append:
description:
- - If V(true), add the listed O(host) to the O(hostgroup).
- - If V(false), only the listed O(host) will be in O(hostgroup), removing any other hosts.
+ - If V(true), add the listed O(host) to the O(hostgroup).
+ - If V(false), only the listed O(host) is set in O(hostgroup), removing any other hosts.
default: false
type: bool
version_added: 6.6.0
cn:
description:
- - Name of host-group.
- - Can not be changed as it is the unique identifier.
+ - Name of host-group.
+ - Can not be changed as it is the unique identifier.
required: true
aliases: ["name"]
type: str
description:
description:
- - Description.
+ - Description.
type: str
host:
description:
- - List of hosts that belong to the host-group.
- - If an empty list is passed all hosts will be removed from the group.
- - If option is omitted hosts will not be checked or changed.
- - If option is passed all assigned hosts that are not passed will be unassigned from the group.
+ - List of hosts that belong to the host-group.
+ - If an empty list is passed all hosts are removed from the group.
+ - If option is omitted hosts are not checked nor changed.
+ - If option is passed all assigned hosts that are not passed are unassigned from the group.
type: list
elements: str
hostgroup:
description:
- - List of host-groups than belong to that host-group.
- - If an empty list is passed all host-groups will be removed from the group.
- - If option is omitted host-groups will not be checked or changed.
- - If option is passed all assigned hostgroups that are not passed will be unassigned from the group.
+ - List of host-groups than belong to that host-group.
+ - If an empty list is passed all host-groups are removed from the group.
+ - If option is omitted host-groups are not checked nor changed.
+ - If option is passed all assigned hostgroups that are not passed are unassigned from the group.
type: list
elements: str
state:
description:
- - State to ensure.
- - V("absent") and V("disabled") give the same results.
- - V("present") and V("enabled") give the same results.
+ - State to ensure.
+ - V("absent") and V("disabled") give the same results.
+ - V("present") and V("enabled") give the same results.
default: "present"
choices: ["absent", "disabled", "enabled", "present"]
type: str
extends_documentation_fragment:
- community.general.ipa.documentation
- community.general.attributes
-'''
+"""
-EXAMPLES = r'''
+EXAMPLES = r"""
- name: Ensure host-group databases is present
community.general.ipa_hostgroup:
name: databases
state: present
host:
- - db.example.com
+ - db.example.com
hostgroup:
- - mysql-server
- - oracle-server
+ - mysql-server
+ - oracle-server
ipa_host: ipa.example.com
ipa_user: admin
ipa_pass: topsecret
@@ -88,14 +87,14 @@ EXAMPLES = r'''
ipa_host: ipa.example.com
ipa_user: admin
ipa_pass: topsecret
-'''
+"""
-RETURN = r'''
+RETURN = r"""
hostgroup:
description: Hostgroup as returned by IPA API.
returned: always
type: dict
-'''
+"""
import traceback
diff --git a/plugins/modules/ipa_otpconfig.py b/plugins/modules/ipa_otpconfig.py
index e2d8f0cd52..3c07c7eda3 100644
--- a/plugins/modules/ipa_otpconfig.py
+++ b/plugins/modules/ipa_otpconfig.py
@@ -8,8 +8,7 @@
from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = r'''
----
+DOCUMENTATION = r"""
module: ipa_otpconfig
author: justchris1 (@justchris1)
short_description: Manage FreeIPA OTP Configuration Settings
@@ -41,10 +40,9 @@ options:
extends_documentation_fragment:
- community.general.ipa.documentation
- community.general.attributes
+"""
-'''
-
-EXAMPLES = r'''
+EXAMPLES = r"""
- name: Ensure the TOTP authentication window is set to 300 seconds
community.general.ipa_otpconfig:
ipatokentotpauthwindow: '300'
@@ -72,14 +70,14 @@ EXAMPLES = r'''
ipa_host: localhost
ipa_user: admin
ipa_pass: supersecret
-'''
+"""
-RETURN = r'''
+RETURN = r"""
otpconfig:
description: OTP configuration as returned by IPA API.
returned: always
type: dict
-'''
+"""
import traceback
diff --git a/plugins/modules/ipa_otptoken.py b/plugins/modules/ipa_otptoken.py
index d8a5b3cf1d..e8c99bd302 100644
--- a/plugins/modules/ipa_otptoken.py
+++ b/plugins/modules/ipa_otptoken.py
@@ -7,8 +7,7 @@
from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = r'''
----
+DOCUMENTATION = r"""
module: ipa_otptoken
author: justchris1 (@justchris1)
short_description: Manage FreeIPA OTPs
@@ -27,25 +26,25 @@ options:
aliases: ["name"]
type: str
newuniqueid:
- description: If specified, the unique id specified will be changed to this.
+ description: If specified, the unique ID specified is changed to this.
type: str
otptype:
description:
- - Type of OTP.
- - "B(Note:) Cannot be modified after OTP is created."
+ - Type of OTP.
+ - B(Note:) Cannot be modified after OTP is created.
type: str
- choices: [ totp, hotp ]
+ choices: [totp, hotp]
secretkey:
description:
- - Token secret (Base64).
- - If OTP is created and this is not specified, a random secret will be generated by IPA.
- - "B(Note:) Cannot be modified after OTP is created."
+ - Token secret (Base64).
+ - If OTP is created and this is not specified, a random secret is generated by IPA.
+ - B(Note:) Cannot be modified after OTP is created.
type: str
description:
description: Description of the token (informational only).
type: str
owner:
- description: Assigned user of the token.
+ description: Assigned user of the token.
type: str
enabled:
description: Mark the token as enabled (default V(true)).
@@ -53,15 +52,15 @@ options:
type: bool
notbefore:
description:
- - First date/time the token can be used.
- - In the format C(YYYYMMddHHmmss).
- - For example, C(20180121182022) will allow the token to be used starting on 21 January 2018 at 18:20:22.
+ - First date/time the token can be used.
+ - In the format C(YYYYMMddHHmmss).
+ - For example, V(20180121182022) allows the token to be used starting on 21 January 2018 at 18:20:22.
type: str
notafter:
description:
- - Last date/time the token can be used.
- - In the format C(YYYYMMddHHmmss).
- - For example, C(20200121182022) will allow the token to be used until 21 January 2020 at 18:20:22.
+ - Last date/time the token can be used.
+ - In the format C(YYYYMMddHHmmss).
+ - For example, V(20200121182022) allows the token to be used until 21 January 2020 at 18:20:22.
type: str
vendor:
description: Token vendor name (informational only).
@@ -79,37 +78,37 @@ options:
type: str
algorithm:
description:
- - Token hash algorithm.
- - "B(Note:) Cannot be modified after OTP is created."
+ - Token hash algorithm.
+ - B(Note:) Cannot be modified after OTP is created.
choices: ['sha1', 'sha256', 'sha384', 'sha512']
type: str
digits:
description:
- - Number of digits each token code will have.
- - "B(Note:) Cannot be modified after OTP is created."
- choices: [ 6, 8 ]
+ - Number of digits each token code has.
+ - B(Note:) Cannot be modified after OTP is created.
+ choices: [6, 8]
type: int
offset:
description:
- - TOTP token / IPA server time difference.
- - "B(Note:) Cannot be modified after OTP is created."
+ - TOTP token / IPA server time difference.
+ - B(Note:) Cannot be modified after OTP is created.
type: int
interval:
description:
- - Length of TOTP token code validity in seconds.
- - "B(Note:) Cannot be modified after OTP is created."
+ - Length of TOTP token code validity in seconds.
+ - B(Note:) Cannot be modified after OTP is created.
type: int
counter:
description:
- - Initial counter for the HOTP token.
- - "B(Note:) Cannot be modified after OTP is created."
+ - Initial counter for the HOTP token.
+ - B(Note:) Cannot be modified after OTP is created.
type: int
extends_documentation_fragment:
- community.general.ipa.documentation
- community.general.attributes
-'''
+"""
-EXAMPLES = r'''
+EXAMPLES = r"""
- name: Create a totp for pinky, allowing the IPA server to generate using defaults
community.general.ipa_otptoken:
uniqueid: Token123
@@ -161,14 +160,14 @@ EXAMPLES = r'''
ipa_host: ipa.example.com
ipa_user: admin
ipa_pass: topsecret
-'''
+"""
-RETURN = r'''
+RETURN = r"""
otptoken:
- description: OTP Token as returned by IPA API
+ description: OTP Token as returned by IPA API.
returned: always
type: dict
-'''
+"""
import base64
import traceback
diff --git a/plugins/modules/ipa_pwpolicy.py b/plugins/modules/ipa_pwpolicy.py
index ba7d702916..5b41651e09 100644
--- a/plugins/modules/ipa_pwpolicy.py
+++ b/plugins/modules/ipa_pwpolicy.py
@@ -7,152 +7,153 @@
from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = r'''
----
+DOCUMENTATION = r"""
module: ipa_pwpolicy
author: Adralioh (@adralioh)
short_description: Manage FreeIPA password policies
description:
-- Add, modify, or delete a password policy using the IPA API.
+ - Add, modify, or delete a password policy using the IPA API.
version_added: 2.0.0
attributes:
- check_mode:
- support: full
- diff_mode:
- support: none
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
options:
- group:
- description:
- - Name of the group that the policy applies to.
- - If omitted, the global policy is used.
- aliases: ["name"]
- type: str
- state:
- description: State to ensure.
- default: "present"
- choices: ["absent", "present"]
- type: str
- maxpwdlife:
- description: Maximum password lifetime (in days).
- type: str
- minpwdlife:
- description: Minimum password lifetime (in hours).
- type: str
- historylength:
- description:
- - Number of previous passwords that are remembered.
- - Users cannot reuse remembered passwords.
- type: str
- minclasses:
- description: Minimum number of character classes.
- type: str
- minlength:
- description: Minimum password length.
- type: str
- priority:
- description:
- - Priority of the policy.
- - High number means lower priority.
- - Required when C(cn) is not the global policy.
- type: str
- maxfailcount:
- description: Maximum number of consecutive failures before lockout.
- type: str
- failinterval:
- description: Period (in seconds) after which the number of failed login attempts is reset.
- type: str
- lockouttime:
- description: Period (in seconds) for which users are locked out.
- type: str
- gracelimit:
- description: Maximum number of LDAP logins after password expiration.
- type: int
- version_added: 8.2.0
- maxrepeat:
- description: Maximum number of allowed same consecutive characters in the new password.
- type: int
- version_added: 8.2.0
- maxsequence:
- description: Maximum length of monotonic character sequences in the new password. An example of a monotonic sequence of length 5 is V(12345).
- type: int
- version_added: 8.2.0
- dictcheck:
- description: Check whether the password (with possible modifications) matches a word in a dictionary (using cracklib).
- type: bool
- version_added: 8.2.0
- usercheck:
- description: Check whether the password (with possible modifications) contains the user name in some form (if the name has > 3 characters).
- type: bool
- version_added: 8.2.0
+ group:
+ description:
+ - Name of the group that the policy applies to.
+ - If omitted, the global policy is used.
+ aliases: ["name"]
+ type: str
+ state:
+ description: State to ensure.
+ default: "present"
+ choices: ["absent", "present"]
+ type: str
+ maxpwdlife:
+ description: Maximum password lifetime (in days).
+ type: str
+ minpwdlife:
+ description: Minimum password lifetime (in hours).
+ type: str
+ historylength:
+ description:
+ - Number of previous passwords that are remembered.
+ - Users cannot reuse remembered passwords.
+ type: str
+ minclasses:
+ description: Minimum number of character classes.
+ type: str
+ minlength:
+ description: Minimum password length.
+ type: str
+ priority:
+ description:
+ - Priority of the policy.
+ - High number means lower priority.
+ - Required when C(cn) is not the global policy.
+ type: str
+ maxfailcount:
+ description: Maximum number of consecutive failures before lockout.
+ type: str
+ failinterval:
+ description: Period (in seconds) after which the number of failed login attempts is reset.
+ type: str
+ lockouttime:
+ description: Period (in seconds) for which users are locked out.
+ type: str
+ gracelimit:
+ description: Maximum number of LDAP logins after password expiration.
+ type: int
+ version_added: 8.2.0
+ maxrepeat:
+ description: Maximum number of allowed same consecutive characters in the new password.
+ type: int
+ version_added: 8.2.0
+ maxsequence:
+ description: Maximum length of monotonic character sequences in the new password. An example of a monotonic sequence of
+ length 5 is V(12345).
+ type: int
+ version_added: 8.2.0
+ dictcheck:
+ description: Check whether the password (with possible modifications) matches a word in a dictionary (using cracklib).
+ type: bool
+ version_added: 8.2.0
+ usercheck:
+ description: Check whether the password (with possible modifications) contains the user name in some form (if the name
+ has > 3 characters).
+ type: bool
+ version_added: 8.2.0
extends_documentation_fragment:
- community.general.ipa.documentation
- community.general.attributes
-'''
+"""
-EXAMPLES = r'''
+EXAMPLES = r"""
- name: Modify the global password policy
community.general.ipa_pwpolicy:
- maxpwdlife: '90'
- minpwdlife: '1'
- historylength: '8'
- minclasses: '3'
- minlength: '16'
- maxfailcount: '6'
- failinterval: '60'
- lockouttime: '600'
- ipa_host: ipa.example.com
- ipa_user: admin
- ipa_pass: topsecret
+ maxpwdlife: '90'
+ minpwdlife: '1'
+ historylength: '8'
+ minclasses: '3'
+ minlength: '16'
+ maxfailcount: '6'
+ failinterval: '60'
+ lockouttime: '600'
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
- name: Ensure the password policy for the group admins is present
community.general.ipa_pwpolicy:
- group: admins
- state: present
- maxpwdlife: '60'
- minpwdlife: '24'
- historylength: '16'
- minclasses: '4'
- priority: '10'
- minlength: '6'
- maxfailcount: '4'
- failinterval: '600'
- lockouttime: '1200'
- gracelimit: 3
- maxrepeat: 3
- maxsequence: 3
- dictcheck: true
- usercheck: true
- ipa_host: ipa.example.com
- ipa_user: admin
- ipa_pass: topsecret
+ group: admins
+ state: present
+ maxpwdlife: '60'
+ minpwdlife: '24'
+ historylength: '16'
+ minclasses: '4'
+ priority: '10'
+ minlength: '6'
+ maxfailcount: '4'
+ failinterval: '600'
+ lockouttime: '1200'
+ gracelimit: 3
+ maxrepeat: 3
+ maxsequence: 3
+ dictcheck: true
+ usercheck: true
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
- name: Ensure that the group sysops does not have a unique password policy
community.general.ipa_pwpolicy:
- group: sysops
- state: absent
- ipa_host: ipa.example.com
- ipa_user: admin
- ipa_pass: topsecret
-'''
+ group: sysops
+ state: absent
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+"""
-RETURN = r'''
+RETURN = r"""
pwpolicy:
- description: Password policy as returned by IPA API.
- returned: always
- type: dict
- sample:
- cn: ['admins']
- cospriority: ['10']
- dn: 'cn=admins,cn=EXAMPLE.COM,cn=kerberos,dc=example,dc=com'
- krbmaxpwdlife: ['60']
- krbminpwdlife: ['24']
- krbpwdfailurecountinterval: ['600']
- krbpwdhistorylength: ['16']
- krbpwdlockoutduration: ['1200']
- krbpwdmaxfailure: ['4']
- krbpwdmindiffchars: ['4']
- objectclass: ['top', 'nscontainer', 'krbpwdpolicy']
-'''
+ description: Password policy as returned by IPA API.
+ returned: always
+ type: dict
+ sample:
+ cn: ['admins']
+ cospriority: ['10']
+ dn: 'cn=admins,cn=EXAMPLE.COM,cn=kerberos,dc=example,dc=com'
+ krbmaxpwdlife: ['60']
+ krbminpwdlife: ['24']
+ krbpwdfailurecountinterval: ['600']
+ krbpwdhistorylength: ['16']
+ krbpwdlockoutduration: ['1200']
+ krbpwdmaxfailure: ['4']
+ krbpwdmindiffchars: ['4']
+ objectclass: ['top', 'nscontainer', 'krbpwdpolicy']
+"""
import traceback
diff --git a/plugins/modules/ipa_role.py b/plugins/modules/ipa_role.py
index fce315b662..6057deec7b 100644
--- a/plugins/modules/ipa_role.py
+++ b/plugins/modules/ipa_role.py
@@ -7,13 +7,12 @@
from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = r'''
----
+DOCUMENTATION = r"""
module: ipa_role
author: Thomas Krahn (@Nosmoht)
short_description: Manage FreeIPA role
description:
-- Add, modify and delete a role within FreeIPA server using FreeIPA API.
+ - Add, modify and delete a role within FreeIPA server using FreeIPA API.
attributes:
check_mode:
support: full
@@ -22,53 +21,53 @@ attributes:
options:
cn:
description:
- - Role name.
- - Can not be changed as it is the unique identifier.
+ - Role name.
+ - Can not be changed as it is the unique identifier.
required: true
aliases: ['name']
type: str
description:
description:
- - A description of this role-group.
+ - A description of this role-group.
type: str
group:
description:
- - List of group names assign to this role.
- - If an empty list is passed all assigned groups will be unassigned from the role.
- - If option is omitted groups will not be checked or changed.
- - If option is passed all assigned groups that are not passed will be unassigned from the role.
+ - List of group names assign to this role.
+ - If an empty list is passed all assigned groups are unassigned from the role.
+ - If option is omitted groups are not checked nor changed.
+ - If option is passed all assigned groups that are not passed are unassigned from the role.
type: list
elements: str
host:
description:
- - List of host names to assign.
- - If an empty list is passed all assigned hosts will be unassigned from the role.
- - If option is omitted hosts will not be checked or changed.
- - If option is passed all assigned hosts that are not passed will be unassigned from the role.
+ - List of host names to assign.
+ - If an empty list is passed all assigned hosts are unassigned from the role.
+ - If option is omitted hosts are not checked nor changed.
+ - If option is passed all assigned hosts that are not passed are unassigned from the role.
type: list
elements: str
hostgroup:
description:
- - List of host group names to assign.
- - If an empty list is passed all assigned host groups will be removed from the role.
- - If option is omitted host groups will not be checked or changed.
- - If option is passed all assigned hostgroups that are not passed will be unassigned from the role.
+ - List of host group names to assign.
+ - If an empty list is passed all assigned host groups are removed from the role.
+ - If option is omitted host groups are not checked nor changed.
+ - If option is passed all assigned hostgroups that are not passed are unassigned from the role.
type: list
elements: str
privilege:
description:
- - List of privileges granted to the role.
- - If an empty list is passed all assigned privileges will be removed.
- - If option is omitted privileges will not be checked or changed.
- - If option is passed all assigned privileges that are not passed will be removed.
+ - List of privileges granted to the role.
+ - If an empty list is passed all assigned privileges are removed.
+ - If option is omitted privileges are not checked nor changed.
+ - If option is passed all assigned privileges that are not passed are removed.
type: list
elements: str
service:
description:
- - List of service names to assign.
- - If an empty list is passed all assigned services will be removed from the role.
- - If option is omitted services will not be checked or changed.
- - If option is passed all assigned services that are not passed will be removed from the role.
+ - List of service names to assign.
+ - If an empty list is passed all assigned services are removed from the role.
+ - If option is omitted services are not checked nor changed.
+ - If option is passed all assigned services that are not passed are removed from the role.
type: list
elements: str
state:
@@ -78,26 +77,25 @@ options:
type: str
user:
description:
- - List of user names to assign.
- - If an empty list is passed all assigned users will be removed from the role.
- - If option is omitted users will not be checked or changed.
+ - List of user names to assign.
+ - If an empty list is passed all assigned users are removed from the role.
+ - If option is omitted users are not checked nor changed.
type: list
elements: str
extends_documentation_fragment:
- community.general.ipa.documentation
- community.general.attributes
+"""
-'''
-
-EXAMPLES = r'''
+EXAMPLES = r"""
- name: Ensure role is present
community.general.ipa_role:
name: dba
description: Database Administrators
state: present
user:
- - pinky
- - brain
+ - pinky
+ - brain
ipa_host: ipa.example.com
ipa_user: admin
ipa_pass: topsecret
@@ -107,16 +105,16 @@ EXAMPLES = r'''
name: another-role
description: Just another role
group:
- - editors
+ - editors
host:
- - host01.example.com
+ - host01.example.com
hostgroup:
- - hostgroup01
+ - hostgroup01
privilege:
- - Group Administrators
- - User Administrators
+ - Group Administrators
+ - User Administrators
service:
- - service01
+ - service01
- name: Ensure role is absent
community.general.ipa_role:
@@ -125,14 +123,14 @@ EXAMPLES = r'''
ipa_host: ipa.example.com
ipa_user: admin
ipa_pass: topsecret
-'''
+"""
-RETURN = r'''
+RETURN = r"""
role:
description: Role as returned by IPA API.
returned: always
type: dict
-'''
+"""
import traceback
diff --git a/plugins/modules/ipa_service.py b/plugins/modules/ipa_service.py
index d9541674f2..51ace78760 100644
--- a/plugins/modules/ipa_service.py
+++ b/plugins/modules/ipa_service.py
@@ -7,13 +7,12 @@
from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = r'''
----
+DOCUMENTATION = r"""
module: ipa_service
author: Cédric Parent (@cprh)
short_description: Manage FreeIPA service
description:
-- Add and delete an IPA service using IPA API.
+ - Add and delete an IPA service using IPA API.
attributes:
check_mode:
support: full
@@ -22,26 +21,26 @@ attributes:
options:
krbcanonicalname:
description:
- - Principal of the service.
- - Can not be changed as it is the unique identifier.
+ - Principal of the service.
+ - Can not be changed as it is the unique identifier.
required: true
aliases: ["name"]
type: str
hosts:
description:
- - Defines the list of 'ManagedBy' hosts.
+ - Defines the list of C(ManagedBy) hosts.
required: false
type: list
elements: str
force:
description:
- - Force principal name even if host is not in DNS.
+ - Force principal name even if host is not in DNS.
required: false
type: bool
skip_host_check:
description:
- - Force service to be created even when host object does not exist to manage it.
- - This is only used on creation, not for updating existing services.
+ - Force service to be created even when host object does not exist to manage it.
+ - This is only used on creation, not for updating existing services.
required: false
type: bool
default: false
@@ -55,10 +54,9 @@ options:
extends_documentation_fragment:
- community.general.ipa.documentation
- community.general.attributes
+"""
-'''
-
-EXAMPLES = r'''
+EXAMPLES = r"""
- name: Ensure service is present
community.general.ipa_service:
name: http/host01.example.com
@@ -79,19 +77,19 @@ EXAMPLES = r'''
community.general.ipa_service:
name: http/host01.example.com
hosts:
- - host01.example.com
- - host02.example.com
+ - host01.example.com
+ - host02.example.com
ipa_host: ipa.example.com
ipa_user: admin
ipa_pass: topsecret
-'''
+"""
-RETURN = r'''
+RETURN = r"""
service:
description: Service as returned by IPA API.
returned: always
type: dict
-'''
+"""
import traceback
@@ -199,10 +197,10 @@ def main():
argument_spec = ipa_argument_spec()
argument_spec.update(
krbcanonicalname=dict(type='str', required=True, aliases=['name']),
- force=dict(type='bool', required=False),
- skip_host_check=dict(type='bool', default=False, required=False),
- hosts=dict(type='list', required=False, elements='str'),
- state=dict(type='str', required=False, default='present',
+ force=dict(type='bool'),
+ skip_host_check=dict(type='bool', default=False),
+ hosts=dict(type='list', elements='str'),
+ state=dict(type='str', default='present',
choices=['present', 'absent']))
module = AnsibleModule(argument_spec=argument_spec,
diff --git a/plugins/modules/ipa_subca.py b/plugins/modules/ipa_subca.py
index 882b1ac396..ddb551689d 100644
--- a/plugins/modules/ipa_subca.py
+++ b/plugins/modules/ipa_subca.py
@@ -7,13 +7,12 @@
from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = r'''
----
+DOCUMENTATION = r"""
module: ipa_subca
author: Abhijeet Kasurde (@Akasurde)
short_description: Manage FreeIPA Lightweight Sub Certificate Authorities
description:
-- Add, modify, enable, disable and delete an IPA Lightweight Sub Certificate Authorities using IPA API.
+ - Add, modify, enable, disable and delete an IPA Lightweight Sub Certificate Authorities using IPA API.
attributes:
check_mode:
support: full
@@ -22,23 +21,23 @@ attributes:
options:
subca_name:
description:
- - The Sub Certificate Authority name which needs to be managed.
+ - The Sub Certificate Authority name which needs to be managed.
required: true
aliases: ["name"]
type: str
subca_subject:
description:
- - The Sub Certificate Authority's Subject. e.g., 'CN=SampleSubCA1,O=testrelm.test'.
+ - The Sub Certificate Authority's Subject, for example V(CN=SampleSubCA1,O=testrelm.test).
required: true
type: str
subca_desc:
description:
- - The Sub Certificate Authority's description.
+ - The Sub Certificate Authority's description.
type: str
state:
description:
- - State to ensure.
- - State 'disable' and 'enable' is available for FreeIPA 4.4.2 version and onwards.
+ - State to ensure.
+ - States V(disable) and V(enable) are available for FreeIPA 4.4.2 version and onwards.
required: false
default: present
choices: ["absent", "disabled", "enabled", "present"]
@@ -46,10 +45,9 @@ options:
extends_documentation_fragment:
- community.general.ipa.documentation
- community.general.attributes
+"""
-'''
-
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Ensure IPA Sub CA is present
community.general.ipa_subca:
ipa_host: spider.example.com
@@ -72,14 +70,14 @@ EXAMPLES = '''
ipa_pass: Passw0rd!
state: disable
subca_name: AnsibleSubCA1
-'''
+"""
-RETURN = r'''
+RETURN = r"""
subca:
description: IPA Sub CA record as returned by IPA API.
returned: always
type: dict
-'''
+"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec
diff --git a/plugins/modules/ipa_sudocmd.py b/plugins/modules/ipa_sudocmd.py
index d3139ba1c3..f52d3e9e6d 100644
--- a/plugins/modules/ipa_sudocmd.py
+++ b/plugins/modules/ipa_sudocmd.py
@@ -7,13 +7,12 @@
from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = r'''
----
+DOCUMENTATION = r"""
module: ipa_sudocmd
author: Thomas Krahn (@Nosmoht)
short_description: Manage FreeIPA sudo command
description:
-- Add, modify or delete sudo command within FreeIPA server using FreeIPA API.
+ - Add, modify or delete sudo command within FreeIPA server using FreeIPA API.
attributes:
check_mode:
support: full
@@ -22,13 +21,13 @@ attributes:
options:
sudocmd:
description:
- - Sudo command.
+ - Sudo command.
aliases: ['name']
required: true
type: str
description:
description:
- - A description of this command.
+ - A description of this command.
type: str
state:
description: State to ensure.
@@ -38,10 +37,9 @@ options:
extends_documentation_fragment:
- community.general.ipa.documentation
- community.general.attributes
+"""
-'''
-
-EXAMPLES = r'''
+EXAMPLES = r"""
- name: Ensure sudo command exists
community.general.ipa_sudocmd:
name: su
@@ -57,14 +55,14 @@ EXAMPLES = r'''
ipa_host: ipa.example.com
ipa_user: admin
ipa_pass: topsecret
-'''
+"""
-RETURN = r'''
+RETURN = r"""
sudocmd:
- description: Sudo command as return from IPA API
+ description: Sudo command as return from IPA API.
returned: always
type: dict
-'''
+"""
import traceback
diff --git a/plugins/modules/ipa_sudocmdgroup.py b/plugins/modules/ipa_sudocmdgroup.py
index a768e74a1a..4298032121 100644
--- a/plugins/modules/ipa_sudocmdgroup.py
+++ b/plugins/modules/ipa_sudocmdgroup.py
@@ -7,13 +7,12 @@
from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = r'''
----
+DOCUMENTATION = r"""
module: ipa_sudocmdgroup
author: Thomas Krahn (@Nosmoht)
short_description: Manage FreeIPA sudo command group
description:
-- Add, modify or delete sudo command group within IPA server using IPA API.
+ - Add, modify or delete sudo command group within IPA server using IPA API.
attributes:
check_mode:
support: full
@@ -22,13 +21,13 @@ attributes:
options:
cn:
description:
- - Sudo Command Group.
+ - Sudo Command Group.
aliases: ['name']
required: true
type: str
description:
description:
- - Group description.
+ - Group description.
type: str
state:
description: State to ensure.
@@ -37,24 +36,23 @@ options:
type: str
sudocmd:
description:
- - List of sudo commands to assign to the group.
- - If an empty list is passed all assigned commands will be removed from the group.
- - If option is omitted sudo commands will not be checked or changed.
+ - List of sudo commands to assign to the group.
+ - If an empty list is passed all assigned commands are removed from the group.
+ - If option is omitted sudo commands are not checked nor changed.
type: list
elements: str
extends_documentation_fragment:
- community.general.ipa.documentation
- community.general.attributes
+"""
-'''
-
-EXAMPLES = r'''
+EXAMPLES = r"""
- name: Ensure sudo command group exists
community.general.ipa_sudocmdgroup:
name: group01
description: Group of important commands
sudocmd:
- - su
+ - su
ipa_host: ipa.example.com
ipa_user: admin
ipa_pass: topsecret
@@ -66,14 +64,14 @@ EXAMPLES = r'''
ipa_host: ipa.example.com
ipa_user: admin
ipa_pass: topsecret
-'''
+"""
-RETURN = r'''
+RETURN = r"""
sudocmdgroup:
- description: Sudo command group as returned by IPA API
+ description: Sudo command group as returned by IPA API.
returned: always
type: dict
-'''
+"""
import traceback
diff --git a/plugins/modules/ipa_sudorule.py b/plugins/modules/ipa_sudorule.py
index 223f6b6de7..ae3730da62 100644
--- a/plugins/modules/ipa_sudorule.py
+++ b/plugins/modules/ipa_sudorule.py
@@ -7,13 +7,12 @@
from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = r'''
----
+DOCUMENTATION = r"""
module: ipa_sudorule
author: Thomas Krahn (@Nosmoht)
short_description: Manage FreeIPA sudo rule
description:
-- Add, modify or delete sudo rule within IPA server using IPA API.
+ - Add, modify or delete sudo rule within IPA server using IPA API.
attributes:
check_mode:
support: full
@@ -22,83 +21,83 @@ attributes:
options:
cn:
description:
- - Canonical name.
- - Can not be changed as it is the unique identifier.
+ - Canonical name.
+ - Can not be changed as it is the unique identifier.
required: true
aliases: ['name']
type: str
cmdcategory:
description:
- - Command category the rule applies to.
+ - Command category the rule applies to.
choices: ['all']
type: str
cmd:
description:
- - List of commands assigned to the rule.
- - If an empty list is passed all commands will be removed from the rule.
- - If option is omitted commands will not be checked or changed.
+ - List of commands assigned to the rule.
+ - If an empty list is passed all commands are removed from the rule.
+ - If option is omitted commands are not checked nor changed.
type: list
elements: str
cmdgroup:
description:
- - List of command groups assigned to the rule.
- - If an empty list is passed all command groups will be removed from the rule.
- - If option is omitted command groups will not be checked or changed.
+ - List of command groups assigned to the rule.
+ - If an empty list is passed all command groups are removed from the rule.
+ - If option is omitted command groups are not checked nor changed.
type: list
elements: str
version_added: 2.0.0
deny_cmd:
description:
- - List of denied commands assigned to the rule.
- - If an empty list is passed all commands will be removed from the rule.
- - If option is omitted commands will not be checked or changed.
+ - List of denied commands assigned to the rule.
+ - If an empty list is passed all commands are removed from the rule.
+ - If option is omitted commands are not checked nor changed.
type: list
elements: str
version_added: 8.1.0
deny_cmdgroup:
description:
- - List of denied command groups assigned to the rule.
- - If an empty list is passed all command groups will be removed from the rule.
- - If option is omitted command groups will not be checked or changed.
+ - List of denied command groups assigned to the rule.
+ - If an empty list is passed all command groups are removed from the rule.
+ - If option is omitted command groups are not checked nor changed.
type: list
elements: str
version_added: 8.1.0
description:
description:
- - Description of the sudo rule.
+ - Description of the sudo rule.
type: str
host:
description:
- - List of hosts assigned to the rule.
- - If an empty list is passed all hosts will be removed from the rule.
- - If option is omitted hosts will not be checked or changed.
- - Option O(hostcategory) must be omitted to assign hosts.
+ - List of hosts assigned to the rule.
+ - If an empty list is passed all hosts are removed from the rule.
+ - If option is omitted hosts are not checked nor changed.
+ - Option O(hostcategory) must be omitted to assign hosts.
type: list
elements: str
hostcategory:
description:
- - Host category the rule applies to.
- - If V(all) is passed one must omit O(host) and O(hostgroup).
- - Option O(host) and O(hostgroup) must be omitted to assign V(all).
+ - Host category the rule applies to.
+ - If V(all) is passed one must omit O(host) and O(hostgroup).
+ - Option O(host) and O(hostgroup) must be omitted to assign V(all).
choices: ['all']
type: str
hostgroup:
description:
- - List of host groups assigned to the rule.
- - If an empty list is passed all host groups will be removed from the rule.
- - If option is omitted host groups will not be checked or changed.
- - Option O(hostcategory) must be omitted to assign host groups.
+ - List of host groups assigned to the rule.
+ - If an empty list is passed all host groups are removed from the rule.
+ - If option is omitted host groups are not checked nor changed.
+ - Option O(hostcategory) must be omitted to assign host groups.
type: list
elements: str
runasextusers:
description:
- - List of external RunAs users
+ - List of external RunAs users.
type: list
elements: str
version_added: 2.3.0
runasusercategory:
description:
- - RunAs User category the rule applies to.
+ - RunAs User category the rule applies to.
choices: ['all']
type: str
runasgroupcategory:
@@ -113,21 +112,21 @@ options:
elements: str
user:
description:
- - List of users assigned to the rule.
- - If an empty list is passed all users will be removed from the rule.
- - If option is omitted users will not be checked or changed.
+ - List of users assigned to the rule.
+ - If an empty list is passed all users are removed from the rule.
+ - If option is omitted users are not checked nor changed.
type: list
elements: str
usercategory:
description:
- - User category the rule applies to.
+ - User category the rule applies to.
choices: ['all']
type: str
usergroup:
description:
- - List of user groups assigned to the rule.
- - If an empty list is passed all user groups will be removed from the rule.
- - If option is omitted user groups will not be checked or changed.
+ - List of user groups assigned to the rule.
+ - If an empty list is passed all user groups are removed from the rule.
+ - If option is omitted user groups are not checked nor changed.
type: list
elements: str
state:
@@ -138,18 +137,18 @@ options:
extends_documentation_fragment:
- community.general.ipa.documentation
- community.general.attributes
+"""
-'''
-
-EXAMPLES = r'''
-- name: Ensure sudo rule is present that's allows all every body to execute any command on any host without being asked for a password.
+EXAMPLES = r"""
+- name: Ensure sudo rule is present that's allows all every body to execute any command on any host without being asked
+ for a password.
community.general.ipa_sudorule:
name: sudo_all_nopasswd
cmdcategory: all
description: Allow to run every command with sudo without password
hostcategory: all
sudoopt:
- - '!authenticate'
+ - '!authenticate'
usercategory: all
ipa_host: ipa.example.com
ipa_user: admin
@@ -161,13 +160,13 @@ EXAMPLES = r'''
description: Allow developers to run every command with sudo on all database server
cmdcategory: all
host:
- - db01.example.com
+ - db01.example.com
hostgroup:
- - db-server
+ - db-server
sudoopt:
- - '!authenticate'
+ - '!authenticate'
usergroup:
- - developers
+ - developers
ipa_host: ipa.example.com
ipa_user: admin
ipa_pass: topsecret
@@ -177,25 +176,25 @@ EXAMPLES = r'''
name: sudo_operations_all
description: Allow operators to run any commands that is part of operations-cmdgroup on any host as user root.
cmdgroup:
- - operations-cmdgroup
+ - operations-cmdgroup
hostcategory: all
runasextusers:
- - root
+ - root
sudoopt:
- - '!authenticate'
+ - '!authenticate'
usergroup:
- - operators
+ - operators
ipa_host: ipa.example.com
ipa_user: admin
ipa_pass: topsecret
-'''
+"""
-RETURN = r'''
+RETURN = r"""
sudorule:
- description: Sudorule as returned by IPA
+ description: Sudorule as returned by IPA.
returned: always
type: dict
-'''
+"""
import traceback
diff --git a/plugins/modules/ipa_user.py b/plugins/modules/ipa_user.py
index e8a1858d0b..6e61f89600 100644
--- a/plugins/modules/ipa_user.py
+++ b/plugins/modules/ipa_user.py
@@ -7,13 +7,12 @@
from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = r'''
----
+DOCUMENTATION = r"""
module: ipa_user
author: Thomas Krahn (@Nosmoht)
short_description: Manage FreeIPA users
description:
-- Add, modify and delete user within IPA server.
+ - Add, modify and delete user within IPA server.
attributes:
check_mode:
support: full
@@ -25,46 +24,46 @@ options:
type: str
update_password:
description:
- - Set password for a user.
+ - Set password for a user.
type: str
default: 'always'
- choices: [ always, on_create ]
+ choices: [always, on_create]
givenname:
description:
- - First name.
- - If user does not exist and O(state=present), the usage of O(givenname) is required.
+ - First name.
+ - If user does not exist and O(state=present), the usage of O(givenname) is required.
type: str
krbpasswordexpiration:
description:
- - Date at which the user password will expire.
- - In the format YYYYMMddHHmmss.
- - e.g. 20180121182022 will expire on 21 January 2018 at 18:20:22.
+ - Date at which the user password expires.
+ - In the format YYYYMMddHHmmss.
+ - For example V(20180121182022) expires on 21 January 2018 at 18:20:22.
type: str
loginshell:
description: Login shell.
type: str
mail:
description:
- - List of mail addresses assigned to the user.
- - If an empty list is passed all assigned email addresses will be deleted.
- - If None is passed email addresses will not be checked or changed.
+ - List of mail addresses assigned to the user.
+ - If an empty list is passed all assigned email addresses are deleted.
+ - If None is passed email addresses are not checked nor changed.
type: list
elements: str
password:
description:
- - Password for a user.
- - Will not be set for an existing user unless O(update_password=always), which is the default.
+ - Password for a user.
+ - It is not set for an existing user unless O(update_password=always), which is the default.
type: str
sn:
description:
- - Surname.
- - If user does not exist and O(state=present), the usage of O(sn) is required.
+ - Surname.
+ - If user does not exist and O(state=present), the usage of O(sn) is required.
type: str
sshpubkey:
description:
- - List of public SSH key.
- - If an empty list is passed all assigned public keys will be deleted.
- - If None is passed SSH public keys will not be checked or changed.
+ - List of public SSH key.
+ - If an empty list is passed all assigned public keys are deleted.
+ - If None is passed SSH public keys are not checked nor changed.
type: list
elements: str
state:
@@ -74,37 +73,37 @@ options:
type: str
telephonenumber:
description:
- - List of telephone numbers assigned to the user.
- - If an empty list is passed all assigned telephone numbers will be deleted.
- - If None is passed telephone numbers will not be checked or changed.
+ - List of telephone numbers assigned to the user.
+ - If an empty list is passed all assigned telephone numbers are deleted.
+ - If None is passed telephone numbers are not checked nor changed.
type: list
elements: str
title:
description: Title.
type: str
uid:
- description: uid of the user.
+ description: Uid of the user.
required: true
aliases: ["name"]
type: str
uidnumber:
description:
- - Account Settings UID/Posix User ID number.
+ - Account Settings UID/Posix User ID number.
type: str
gidnumber:
description:
- - Posix Group ID.
+ - Posix Group ID.
type: str
homedirectory:
description:
- - Default home directory of the user.
+ - Default home directory of the user.
type: str
version_added: '0.2.0'
userauthtype:
description:
- - The authentication type to use for the user.
- - To remove all authentication types from the user, use an empty list V([]).
- - The choice V(idp) and V(passkey) has been added in community.general 8.1.0.
+ - The authentication type to use for the user.
+ - To remove all authentication types from the user, use an empty list V([]).
+ - The choice V(idp) and V(passkey) has been added in community.general 8.1.0.
choices: ["password", "radius", "otp", "pkinit", "hardened", "idp", "passkey"]
type: list
elements: str
@@ -114,11 +113,11 @@ extends_documentation_fragment:
- community.general.attributes
requirements:
-- base64
-- hashlib
-'''
+ - base64
+ - hashlib
+"""
-EXAMPLES = r'''
+EXAMPLES = r"""
- name: Ensure pinky is present and always reset password
community.general.ipa_user:
name: pinky
@@ -127,12 +126,12 @@ EXAMPLES = r'''
givenname: Pinky
sn: Acme
mail:
- - pinky@acme.com
+ - pinky@acme.com
telephonenumber:
- - '+555123456'
+ - '+555123456'
sshpubkey:
- - ssh-rsa ....
- - ssh-dsa ....
+ - ssh-rsa ....
+ - ssh-dsa ....
uidnumber: '1001'
gidnumber: '100'
homedirectory: /home/pinky
@@ -170,14 +169,14 @@ EXAMPLES = r'''
ipa_host: ipa.example.com
ipa_user: admin
ipa_pass: topsecret
-'''
+"""
-RETURN = r'''
+RETURN = r"""
user:
- description: User as returned by IPA API
+ description: User as returned by IPA API.
returned: always
type: dict
-'''
+"""
import base64
import hashlib
@@ -269,7 +268,7 @@ def get_user_diff(client, ipa_user, module_user):
if 'sshpubkeyfp' in ipa_user and ipa_user['sshpubkeyfp'][0][:7].upper() == 'SHA256:':
hash_algo = 'sha256'
module_user['sshpubkeyfp'] = [get_ssh_key_fingerprint(pubkey, hash_algo) for pubkey in module_user['ipasshpubkey']]
- # Remove the ipasshpubkey element as it is not returned from IPA but save it's value to be used later on
+ # Remove the ipasshpubkey element as it is not returned from IPA but save its value to be used later on
sshpubkey = module_user['ipasshpubkey']
del module_user['ipasshpubkey']
diff --git a/plugins/modules/ipa_vault.py b/plugins/modules/ipa_vault.py
index 88947e470e..23002b7ce0 100644
--- a/plugins/modules/ipa_vault.py
+++ b/plugins/modules/ipa_vault.py
@@ -7,84 +7,82 @@
from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = r'''
----
+DOCUMENTATION = r"""
module: ipa_vault
author: Juan Manuel Parrilla (@jparrill)
short_description: Manage FreeIPA vaults
description:
-- Add, modify and delete vaults and secret vaults.
-- KRA service should be enabled to use this module.
+ - Add, modify and delete vaults and secret vaults.
+ - KRA service should be enabled to use this module.
attributes:
- check_mode:
- support: full
- diff_mode:
- support: none
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
options:
- cn:
- description:
- - Vault name.
- - Can not be changed as it is the unique identifier.
- required: true
- aliases: ["name"]
- type: str
+ cn:
description:
- description:
- - Description.
- type: str
- ipavaulttype:
- description:
- - Vault types are based on security level.
- default: "symmetric"
- choices: ["asymmetric", "standard", "symmetric"]
- aliases: ["vault_type"]
- type: str
- ipavaultpublickey:
- description:
- - Public key.
- aliases: ["vault_public_key"]
- type: str
- ipavaultsalt:
- description:
- - Vault Salt.
- aliases: ["vault_salt"]
- type: str
- username:
- description:
- - Any user can own one or more user vaults.
- - Mutually exclusive with service.
- aliases: ["user"]
- type: list
- elements: str
- service:
- description:
- - Any service can own one or more service vaults.
- - Mutually exclusive with user.
- type: str
- state:
- description:
- - State to ensure.
- default: "present"
- choices: ["absent", "present"]
- type: str
- replace:
- description:
- - Force replace the existent vault on IPA server.
- type: bool
- default: false
- choices: ["True", "False"]
- validate_certs:
- description:
- - Validate IPA server certificates.
- type: bool
- default: true
+ - Vault name.
+ - Can not be changed as it is the unique identifier.
+ required: true
+ aliases: ["name"]
+ type: str
+ description:
+ description:
+ - Description.
+ type: str
+ ipavaulttype:
+ description:
+ - Vault types are based on security level.
+ default: "symmetric"
+ choices: ["asymmetric", "standard", "symmetric"]
+ aliases: ["vault_type"]
+ type: str
+ ipavaultpublickey:
+ description:
+ - Public key.
+ aliases: ["vault_public_key"]
+ type: str
+ ipavaultsalt:
+ description:
+ - Vault Salt.
+ aliases: ["vault_salt"]
+ type: str
+ username:
+ description:
+ - Any user can own one or more user vaults.
+ - Mutually exclusive with O(service).
+ aliases: ["user"]
+ type: list
+ elements: str
+ service:
+ description:
+ - Any service can own one or more service vaults.
+ - Mutually exclusive with O(user).
+ type: str
+ state:
+ description:
+ - State to ensure.
+ default: "present"
+ choices: ["absent", "present"]
+ type: str
+ replace:
+ description:
+ - Force replace the existent vault on IPA server.
+ type: bool
+ default: false
+ choices: ["True", "False"]
+ validate_certs:
+ description:
+ - Validate IPA server certificates.
+ type: bool
+ default: true
extends_documentation_fragment:
- community.general.ipa.documentation
- community.general.attributes
+"""
-'''
-
-EXAMPLES = r'''
+EXAMPLES = r"""
- name: Ensure vault is present
community.general.ipa_vault:
name: vault01
@@ -128,14 +126,14 @@ EXAMPLES = r'''
ipa_host: ipa.example.com
ipa_user: admin
ipa_pass: topsecret
-'''
+"""
-RETURN = r'''
+RETURN = r"""
vault:
- description: Vault as returned by IPA API
+ description: Vault as returned by IPA API.
returned: always
type: dict
-'''
+"""
import traceback
diff --git a/plugins/modules/ipbase_info.py b/plugins/modules/ipbase_info.py
index c6a5511b73..7a2dde13d6 100644
--- a/plugins/modules/ipbase_info.py
+++ b/plugins/modules/ipbase_info.py
@@ -8,13 +8,12 @@
from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
module: "ipbase_info"
version_added: "7.0.0"
short_description: "Retrieve IP geolocation and other facts of a host's IP address using the ipbase.com API"
description:
- - "Retrieve IP geolocation and other facts of a host's IP address using the ipbase.com API"
+ - Retrieve IP geolocation and other facts of a host's IP address using the ipbase.com API.
author: "Dominik Kukacka (@dominikkukacka)"
extends_documentation_fragment:
- "community.general.attributes"
@@ -22,31 +21,31 @@ extends_documentation_fragment:
options:
ip:
description:
- - "The IP you want to get the info for. If not specified the API will detect the IP automatically."
+ - The IP you want to get the info for. If not specified the API detects the IP automatically.
required: false
type: str
apikey:
description:
- - "The API key for the request if you need more requests."
+ - The API key for the request if you need more requests.
required: false
type: str
hostname:
description:
- - "If the O(hostname) parameter is set to V(true), the API response will contain the hostname of the IP."
+ - If the O(hostname) parameter is set to V(true), the API response contains the hostname of the IP.
required: false
type: bool
default: false
language:
description:
- - "An ISO Alpha 2 Language Code for localizing the IP data"
+ - An ISO Alpha 2 Language Code for localizing the IP data.
required: false
type: str
default: "en"
notes:
- - "Check U(https://ipbase.com/) for more information."
-'''
+ - Check U(https://ipbase.com/) for more information.
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: "Get IP geolocation information of the primary outgoing IP"
community.general.ipbase_info:
register: my_ip_info
@@ -64,156 +63,157 @@ EXAMPLES = '''
hostname: true
language: "de"
register: my_ip_info
+"""
-'''
-
-RETURN = '''
+RETURN = r"""
data:
- description: "JSON parsed response from ipbase.com. Please refer to U(https://ipbase.com/docs/info) for the detailed structure of the response."
+ description: "JSON parsed response from ipbase.com. Please refer to U(https://ipbase.com/docs/info) for the detailed structure
+ of the response."
returned: success
type: dict
- sample: {
- "ip": "1.1.1.1",
- "hostname": "one.one.one.one",
- "type": "v4",
- "range_type": {
- "type": "PUBLIC",
- "description": "Public address"
- },
- "connection": {
- "asn": 13335,
- "organization": "Cloudflare, Inc.",
- "isp": "APNIC Research and Development",
- "range": "1.1.1.1/32"
- },
- "location": {
- "geonames_id": 5332870,
- "latitude": 34.053611755371094,
- "longitude": -118.24549865722656,
- "zip": "90012",
- "continent": {
- "code": "NA",
- "name": "North America",
- "name_translated": "North America"
+ sample:
+ {
+ "ip": "1.1.1.1",
+ "hostname": "one.one.one.one",
+ "type": "v4",
+ "range_type": {
+ "type": "PUBLIC",
+ "description": "Public address"
},
- "country": {
- "alpha2": "US",
- "alpha3": "USA",
- "calling_codes": [
- "+1"
- ],
- "currencies": [
- {
- "symbol": "$",
- "name": "US Dollar",
- "symbol_native": "$",
- "decimal_digits": 2,
- "rounding": 0,
- "code": "USD",
- "name_plural": "US dollars"
- }
- ],
- "emoji": "...",
- "ioc": "USA",
- "languages": [
- {
- "name": "English",
- "name_native": "English"
- }
- ],
- "name": "United States",
- "name_translated": "United States",
- "timezones": [
- "America/New_York",
- "America/Detroit",
- "America/Kentucky/Louisville",
- "America/Kentucky/Monticello",
- "America/Indiana/Indianapolis",
- "America/Indiana/Vincennes",
- "America/Indiana/Winamac",
- "America/Indiana/Marengo",
- "America/Indiana/Petersburg",
- "America/Indiana/Vevay",
- "America/Chicago",
- "America/Indiana/Tell_City",
- "America/Indiana/Knox",
- "America/Menominee",
- "America/North_Dakota/Center",
- "America/North_Dakota/New_Salem",
- "America/North_Dakota/Beulah",
- "America/Denver",
- "America/Boise",
- "America/Phoenix",
- "America/Los_Angeles",
- "America/Anchorage",
- "America/Juneau",
- "America/Sitka",
- "America/Metlakatla",
- "America/Yakutat",
- "America/Nome",
- "America/Adak",
- "Pacific/Honolulu"
- ],
- "is_in_european_union": false,
- "fips": "US",
- "geonames_id": 6252001,
- "hasc_id": "US",
- "wikidata_id": "Q30"
+ "connection": {
+ "asn": 13335,
+ "organization": "Cloudflare, Inc.",
+ "isp": "APNIC Research and Development",
+ "range": "1.1.1.1/32"
},
- "city": {
- "fips": "644000",
- "alpha2": null,
- "geonames_id": 5368753,
- "hasc_id": null,
- "wikidata_id": "Q65",
- "name": "Los Angeles",
- "name_translated": "Los Angeles"
+ "location": {
+ "geonames_id": 5332870,
+ "latitude": 34.053611755371094,
+ "longitude": -118.24549865722656,
+ "zip": "90012",
+ "continent": {
+ "code": "NA",
+ "name": "North America",
+ "name_translated": "North America"
+ },
+ "country": {
+ "alpha2": "US",
+ "alpha3": "USA",
+ "calling_codes": [
+ "+1"
+ ],
+ "currencies": [
+ {
+ "symbol": "$",
+ "name": "US Dollar",
+ "symbol_native": "$",
+ "decimal_digits": 2,
+ "rounding": 0,
+ "code": "USD",
+ "name_plural": "US dollars"
+ }
+ ],
+ "emoji": "...",
+ "ioc": "USA",
+ "languages": [
+ {
+ "name": "English",
+ "name_native": "English"
+ }
+ ],
+ "name": "United States",
+ "name_translated": "United States",
+ "timezones": [
+ "America/New_York",
+ "America/Detroit",
+ "America/Kentucky/Louisville",
+ "America/Kentucky/Monticello",
+ "America/Indiana/Indianapolis",
+ "America/Indiana/Vincennes",
+ "America/Indiana/Winamac",
+ "America/Indiana/Marengo",
+ "America/Indiana/Petersburg",
+ "America/Indiana/Vevay",
+ "America/Chicago",
+ "America/Indiana/Tell_City",
+ "America/Indiana/Knox",
+ "America/Menominee",
+ "America/North_Dakota/Center",
+ "America/North_Dakota/New_Salem",
+ "America/North_Dakota/Beulah",
+ "America/Denver",
+ "America/Boise",
+ "America/Phoenix",
+ "America/Los_Angeles",
+ "America/Anchorage",
+ "America/Juneau",
+ "America/Sitka",
+ "America/Metlakatla",
+ "America/Yakutat",
+ "America/Nome",
+ "America/Adak",
+ "Pacific/Honolulu"
+ ],
+ "is_in_european_union": false,
+ "fips": "US",
+ "geonames_id": 6252001,
+ "hasc_id": "US",
+ "wikidata_id": "Q30"
+ },
+ "city": {
+ "fips": "644000",
+ "alpha2": null,
+ "geonames_id": 5368753,
+ "hasc_id": null,
+ "wikidata_id": "Q65",
+ "name": "Los Angeles",
+ "name_translated": "Los Angeles"
+ },
+ "region": {
+ "fips": "US06",
+ "alpha2": "US-CA",
+ "geonames_id": 5332921,
+ "hasc_id": "US.CA",
+ "wikidata_id": "Q99",
+ "name": "California",
+ "name_translated": "California"
+ }
},
- "region": {
- "fips": "US06",
- "alpha2": "US-CA",
- "geonames_id": 5332921,
- "hasc_id": "US.CA",
- "wikidata_id": "Q99",
- "name": "California",
- "name_translated": "California"
+ "tlds": [
+ ".us"
+ ],
+ "timezone": {
+ "id": "America/Los_Angeles",
+ "current_time": "2023-05-04T04:30:28-07:00",
+ "code": "PDT",
+ "is_daylight_saving": true,
+ "gmt_offset": -25200
+ },
+ "security": {
+ "is_anonymous": false,
+ "is_datacenter": false,
+ "is_vpn": false,
+ "is_bot": false,
+ "is_abuser": true,
+ "is_known_attacker": true,
+ "is_proxy": false,
+ "is_spam": false,
+ "is_tor": false,
+ "is_icloud_relay": false,
+ "threat_score": 100
+ },
+ "domains": {
+ "count": 10943,
+ "domains": [
+ "eliwise.academy",
+ "accountingprose.academy",
+ "pistola.academy",
+ "1and1-test-ntlds-fr.accountant",
+ "omnergy.africa"
+ ]
}
- },
- "tlds": [
- ".us"
- ],
- "timezone": {
- "id": "America/Los_Angeles",
- "current_time": "2023-05-04T04:30:28-07:00",
- "code": "PDT",
- "is_daylight_saving": true,
- "gmt_offset": -25200
- },
- "security": {
- "is_anonymous": false,
- "is_datacenter": false,
- "is_vpn": false,
- "is_bot": false,
- "is_abuser": true,
- "is_known_attacker": true,
- "is_proxy": false,
- "is_spam": false,
- "is_tor": false,
- "is_icloud_relay": false,
- "threat_score": 100
- },
- "domains": {
- "count": 10943,
- "domains": [
- "eliwise.academy",
- "accountingprose.academy",
- "pistola.academy",
- "1and1-test-ntlds-fr.accountant",
- "omnergy.africa"
- ]
}
- }
-'''
+"""
from ansible.module_utils.basic import AnsibleModule
@@ -285,10 +285,10 @@ class IpbaseInfo(object):
def main():
module_args = dict(
- ip=dict(type='str', required=False, no_log=False),
- apikey=dict(type='str', required=False, no_log=True),
- hostname=dict(type='bool', required=False, no_log=False, default=False),
- language=dict(type='str', required=False, no_log=False, default='en'),
+ ip=dict(type='str', no_log=False),
+ apikey=dict(type='str', no_log=True),
+ hostname=dict(type='bool', no_log=False, default=False),
+ language=dict(type='str', no_log=False, default='en'),
)
module = AnsibleModule(
diff --git a/plugins/modules/ipify_facts.py b/plugins/modules/ipify_facts.py
index ff17d7e543..b7cd2b7447 100644
--- a/plugins/modules/ipify_facts.py
+++ b/plugins/modules/ipify_facts.py
@@ -9,14 +9,13 @@ from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = r'''
----
+DOCUMENTATION = r"""
module: ipify_facts
short_description: Retrieve the public IP of your internet gateway
description:
- If behind NAT and need to know the public IP of your internet gateway.
author:
-- René Moser (@resmo)
+ - René Moser (@resmo)
extends_documentation_fragment:
- community.general.attributes
- community.general.attributes.facts
@@ -25,7 +24,7 @@ options:
api_url:
description:
- URL of the ipify.org API service.
- - C(?format=json) will be appended per default.
+ - C(?format=json) is appended by default.
type: str
default: https://api.ipify.org/
timeout:
@@ -35,14 +34,14 @@ options:
default: 10
validate_certs:
description:
- - When set to V(false), SSL certificates will not be validated.
+ - When set to V(false), SSL certificates are not validated.
type: bool
default: true
notes:
- Visit https://www.ipify.org to get more information.
-'''
+"""
-EXAMPLES = r'''
+EXAMPLES = r"""
# Gather IP facts from ipify.org
- name: Get my public IP
community.general.ipify_facts:
@@ -52,16 +51,15 @@ EXAMPLES = r'''
community.general.ipify_facts:
api_url: http://api.example.com/ipify
timeout: 20
-'''
+"""
-RETURN = r'''
----
+RETURN = r"""
ipify_public_ip:
description: Public IP of the internet gateway.
returned: success
type: str
sample: 1.2.3.4
-'''
+"""
import json
diff --git a/plugins/modules/ipinfoio_facts.py b/plugins/modules/ipinfoio_facts.py
index f29b3cbf4c..5db21dc8f8 100644
--- a/plugins/modules/ipinfoio_facts.py
+++ b/plugins/modules/ipinfoio_facts.py
@@ -9,12 +9,11 @@ from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
module: ipinfoio_facts
short_description: Retrieve IP geolocation facts of a host's IP address
description:
- - "Gather IP geolocation facts of a host's IP address using ipinfo.io API"
+ - Gather IP geolocation facts of a host's IP address using ipinfo.io API.
author: "Aleksei Kostiuk (@akostyuk)"
extends_documentation_fragment:
- community.general.attributes
@@ -23,65 +22,65 @@ extends_documentation_fragment:
options:
timeout:
description:
- - HTTP connection timeout in seconds
+ - HTTP connection timeout in seconds.
required: false
default: 10
type: int
http_agent:
description:
- - Set http user agent
+ - Set http user agent.
required: false
default: "ansible-ipinfoio-module/0.0.1"
type: str
notes:
- - "Check http://ipinfo.io/ for more information"
-'''
+ - Check U(http://ipinfo.io/) for more information.
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
# Retrieve geolocation data of a host's IP address
- name: Get IP geolocation data
community.general.ipinfoio_facts:
-'''
+"""
-RETURN = '''
+RETURN = r"""
ansible_facts:
- description: "Dictionary of ip geolocation facts for a host's IP address"
+ description: "Dictionary of IP geolocation facts for a host's IP address."
returned: changed
type: complex
contains:
ip:
- description: "Public IP address of a host"
+ description: "Public IP address of a host."
type: str
sample: "8.8.8.8"
hostname:
- description: Domain name
+ description: Domain name.
type: str
sample: "google-public-dns-a.google.com"
country:
- description: ISO 3166-1 alpha-2 country code
+ description: ISO 3166-1 alpha-2 country code.
type: str
sample: "US"
region:
- description: State or province name
+ description: State or province name.
type: str
sample: "California"
city:
- description: City name
+ description: City name.
type: str
sample: "Mountain View"
loc:
- description: Latitude and Longitude of the location
+ description: Latitude and Longitude of the location.
type: str
sample: "37.3860,-122.0838"
org:
- description: "organization's name"
+ description: "Organization's name."
type: str
sample: "AS3356 Level 3 Communications, Inc."
postal:
- description: Postal code
+ description: Postal code.
type: str
sample: "94035"
-'''
+"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.urls import fetch_url
diff --git a/plugins/modules/ipmi_boot.py b/plugins/modules/ipmi_boot.py
index 9f0016560e..69131732c6 100644
--- a/plugins/modules/ipmi_boot.py
+++ b/plugins/modules/ipmi_boot.py
@@ -9,12 +9,11 @@ from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
module: ipmi_boot
short_description: Management of order of boot devices
description:
- - Use this module to manage order of boot devices
+ - Use this module to manage order of boot devices.
extends_documentation_fragment:
- community.general.attributes
attributes:
@@ -25,7 +24,7 @@ attributes:
options:
name:
description:
- - Hostname or ip address of the BMC.
+ - Hostname or IP address of the BMC.
required: true
type: str
port:
@@ -51,15 +50,15 @@ options:
version_added: 4.1.0
bootdev:
description:
- - Set boot device to use on next reboot
- - "The choices for the device are:
- - network -- Request network boot
- - floppy -- Boot from floppy
- - hd -- Boot from hard drive
- - safe -- Boot from hard drive, requesting 'safe mode'
- - optical -- boot from CD/DVD/BD drive
- - setup -- Boot into setup utility
- - default -- remove any IPMI directed boot device request"
+ - Set boot device to use on next reboot.
+ - 'The choices for the device are:'
+ - V(network) -- Request network boot.
+ - V(floppy) -- Boot from floppy.
+ - V(hd) -- Boot from hard drive.
+ - V(safe) -- Boot from hard drive, requesting 'safe mode'.
+ - V(optical) -- boot from CD/DVD/BD drive.
+ - V(setup) -- Boot into setup utility.
+ - V(default) -- remove any IPMI directed boot device request.
required: true
choices:
- network
@@ -73,49 +72,46 @@ options:
state:
description:
- Whether to ensure that boot devices is desired.
- - "The choices for the state are:
- - present -- Request system turn on
- - absent -- Request system turn on"
+ - 'The choices for the state are: - present -- Request system turn on - absent -- Request system turn on.'
default: present
- choices: [ present, absent ]
+ choices: [present, absent]
type: str
persistent:
description:
- - If set, ask that system firmware uses this device beyond next boot.
- Be aware many systems do not honor this.
+ - If set, ask that system firmware uses this device beyond next boot. Be aware many systems do not honor this.
type: bool
default: false
uefiboot:
description:
- - If set, request UEFI boot explicitly.
- Strictly speaking, the spec suggests that if not set, the system should BIOS boot and offers no "don't care" option.
- In practice, this flag not being set does not preclude UEFI boot on any system I've encountered.
+ - If set, request UEFI boot explicitly. Strictly speaking, the spec suggests that if not set, the system should BIOS
+ boot and offers no "do not care" option. In practice, this flag not being set does not preclude UEFI boot on any system
+ I have encountered.
type: bool
default: false
requirements:
- pyghmi
author: "Bulat Gaifullin (@bgaifullin) "
-'''
+"""
-RETURN = '''
+RETURN = r"""
bootdev:
- description: The boot device name which will be used beyond next boot.
- returned: success
- type: str
- sample: default
+ description: The boot device name which is used beyond next boot.
+ returned: success
+ type: str
+ sample: default
persistent:
- description: If True, system firmware will use this device beyond next boot.
- returned: success
- type: bool
- sample: false
+ description: If V(true), system firmware uses this device beyond next boot.
+ returned: success
+ type: bool
+ sample: false
uefimode:
- description: If True, system firmware will use UEFI boot explicitly beyond next boot.
- returned: success
- type: bool
- sample: false
-'''
+ description: If V(true), system firmware uses UEFI boot explicitly beyond next boot.
+ returned: success
+ type: bool
+ sample: false
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Ensure bootdevice is HD
community.general.ipmi_boot:
name: test.testdomain.com
@@ -131,7 +127,7 @@ EXAMPLES = '''
key: 1234567890AABBCCDEFF000000EEEE12
bootdev: network
state: absent
-'''
+"""
import traceback
import binascii
diff --git a/plugins/modules/ipmi_power.py b/plugins/modules/ipmi_power.py
index 587cee06f3..292ecc73aa 100644
--- a/plugins/modules/ipmi_power.py
+++ b/plugins/modules/ipmi_power.py
@@ -9,12 +9,11 @@ from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
module: ipmi_power
short_description: Power management for machine
description:
- - Use this module for power management
+ - Use this module for power management.
extends_documentation_fragment:
- community.general.attributes
attributes:
@@ -25,7 +24,7 @@ attributes:
options:
name:
description:
- - Hostname or ip address of the BMC.
+ - Hostname or IP address of the BMC.
required: true
type: str
port:
@@ -52,12 +51,12 @@ options:
state:
description:
- Whether to ensure that the machine in desired state.
- - "The choices for state are:
- - on -- Request system turn on
- - off -- Request system turn off without waiting for OS to shutdown
- - shutdown -- Have system request OS proper shutdown
- - reset -- Request system reset without waiting for OS
- - boot -- If system is off, then 'on', else 'reset'"
+ - 'The choices for state are:'
+ - V(on) -- Request system turn on.
+ - V(off) -- Request system turn off without waiting for OS to shutdown.
+ - V(shutdown) -- Have system request OS proper shutdown.
+ - V(reset) -- Request system reset without waiting for OS.
+ - V(boot) -- If system is off, then V(on), else V(reset).
- Either this option or O(machine) is required.
choices: ['on', 'off', shutdown, reset, boot]
type: str
@@ -68,8 +67,7 @@ options:
type: int
machine:
description:
- - Provide a list of the remote target address for the bridge IPMI request,
- and the power status.
+ - Provide a list of the remote target address for the bridge IPMI request, and the power status.
- Either this option or O(state) is required.
required: false
type: list
@@ -92,40 +90,41 @@ options:
requirements:
- pyghmi
author: "Bulat Gaifullin (@bgaifullin) "
-'''
+"""
-RETURN = '''
+RETURN = r"""
powerstate:
- description: The current power state of the machine.
- returned: success and O(machine) is not provided
- type: str
- sample: 'on'
+ description: The current power state of the machine.
+ returned: success and O(machine) is not provided
+ type: str
+ sample: 'on'
status:
- description: The current power state of the machine when the machine option is set.
- returned: success and O(machine) is provided
- type: list
- elements: dict
- version_added: 4.3.0
- contains:
- powerstate:
- description: The current power state of the machine specified by RV(status[].targetAddress).
- type: str
- targetAddress:
- description: The remote target address.
- type: int
- sample: [
- {
- "powerstate": "on",
- "targetAddress": 48,
- },
- {
- "powerstate": "on",
- "targetAddress": 50,
- },
+ description: The current power state of the machine when the machine option is set.
+ returned: success and O(machine) is provided
+ type: list
+ elements: dict
+ version_added: 4.3.0
+ contains:
+ powerstate:
+ description: The current power state of the machine specified by RV(status[].targetAddress).
+ type: str
+ targetAddress:
+ description: The remote target address.
+ type: int
+ sample:
+ [
+ {
+ "powerstate": "on",
+ "targetAddress": 48
+ },
+ {
+ "powerstate": "on",
+ "targetAddress": 50
+ }
]
-'''
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Ensure machine is powered on
community.general.ipmi_power:
name: test.testdomain.com
@@ -153,7 +152,7 @@ EXAMPLES = '''
state: 'on'
- targetAddress: 50
state: 'off'
-'''
+"""
import traceback
import binascii
diff --git a/plugins/modules/iptables_state.py b/plugins/modules/iptables_state.py
index c97b5694c9..21fe75ce02 100644
--- a/plugins/modules/iptables_state.py
+++ b/plugins/modules/iptables_state.py
@@ -9,8 +9,7 @@ from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = r'''
----
+DOCUMENTATION = r"""
module: iptables_state
short_description: Save iptables state into a file or restore it from a file
version_added: '1.1.0'
@@ -19,26 +18,17 @@ extends_documentation_fragment:
- community.general.attributes
- community.general.attributes.flow
description:
- - C(iptables) is used to set up, maintain, and inspect the tables of IP
- packet filter rules in the Linux kernel.
- - This module handles the saving and/or loading of rules. This is the same
- as the behaviour of the C(iptables-save) and C(iptables-restore) (or
- C(ip6tables-save) and C(ip6tables-restore) for IPv6) commands which this
- module uses internally.
- - Modifying the state of the firewall remotely may lead to loose access to
- the host in case of mistake in new ruleset. This module embeds a rollback
- feature to avoid this, by telling the host to restore previous rules if a
- cookie is still there after a given delay, and all this time telling the
- controller to try to remove this cookie on the host through a new
- connection.
+ - C(iptables) is used to set up, maintain, and inspect the tables of IP packet filter rules in the Linux kernel.
+ - This module handles the saving and/or loading of rules. This is the same as the behaviour of the C(iptables-save) and
+ C(iptables-restore) (or C(ip6tables-save) and C(ip6tables-restore) for IPv6) commands which this module uses internally.
+ - Modifying the state of the firewall remotely may lead to loose access to the host in case of mistake in new ruleset. This
+ module embeds a rollback feature to avoid this, by telling the host to restore previous rules if a cookie is still there
+ after a given delay, and all this time telling the controller to try to remove this cookie on the host through a new connection.
notes:
- - The rollback feature is not a module option and depends on task's
- attributes. To enable it, the module must be played asynchronously, i.e.
- by setting task attributes C(poll) to V(0), and C(async) to a value less
- or equal to C(ANSIBLE_TIMEOUT). If C(async) is greater, the rollback will
- still happen if it shall happen, but you will experience a connection
- timeout instead of more relevant info returned by the module after its
- failure.
+ - The rollback feature is not a module option and depends on task's attributes. To enable it, the module must be played
+ asynchronously, in other words by setting task attributes C(poll) to V(0), and C(async) to a value less or equal to C(ANSIBLE_TIMEOUT).
+ If C(async) is greater, the rollback still happens when needed, but you experience a connection timeout instead of more
+ relevant info returned by the module after its failure.
attributes:
check_mode:
support: full
@@ -59,22 +49,18 @@ options:
description:
- Which version of the IP protocol this module should apply to.
type: str
- choices: [ ipv4, ipv6 ]
+ choices: [ipv4, ipv6]
default: ipv4
modprobe:
description:
- - Specify the path to the C(modprobe) program internally used by iptables
- related commands to load kernel modules.
- - By default, V(/proc/sys/kernel/modprobe) is inspected to determine the
- executable's path.
+ - Specify the path to the C(modprobe) program internally used by iptables related commands to load kernel modules.
+ - By default, V(/proc/sys/kernel/modprobe) is inspected to determine the executable's path.
type: path
noflush:
description:
- For O(state=restored), ignored otherwise.
- - If V(false), restoring iptables rules from a file flushes (deletes)
- all previous contents of the respective table(s). If V(true), the
- previous rules are left untouched (but policies are updated anyway,
- for all built-in chains).
+ - If V(false), restoring iptables rules from a file flushes (deletes) all previous contents of the respective table(s).
+ If V(true), the previous rules are left untouched (but policies are updated anyway, for all built-in chains).
type: bool
default: false
path:
@@ -85,29 +71,26 @@ options:
required: true
state:
description:
- - Whether the firewall state should be saved (into a file) or restored
- (from a file).
+ - Whether the firewall state should be saved (into a file) or restored (from a file).
type: str
- choices: [ saved, restored ]
+ choices: [saved, restored]
required: true
table:
description:
- - When O(state=restored), restore only the named table even if the input
- file contains other tables. Fail if the named table is not declared in
- the file.
- - When O(state=saved), restrict output to the specified table. If not
- specified, output includes all active tables.
+ - When O(state=restored), restore only the named table even if the input file contains other tables. Fail if the named
+ table is not declared in the file.
+ - When O(state=saved), restrict output to the specified table. If not specified, output includes all active tables.
type: str
- choices: [ filter, nat, mangle, raw, security ]
+ choices: [filter, nat, mangle, raw, security]
wait:
description:
- - Wait N seconds for the xtables lock to prevent instant failure in case
- multiple instances of the program are running concurrently.
+ - Wait N seconds for the xtables lock to prevent instant failure in case multiple instances of the program are running
+ concurrently.
type: int
requirements: [iptables, ip6tables]
-'''
+"""
-EXAMPLES = r'''
+EXAMPLES = r"""
# This will apply to all loaded/active IPv4 tables.
- name: Save current state of the firewall in system file
community.general.iptables_state:
@@ -151,9 +134,9 @@ EXAMPLES = r'''
- name: show current state of the firewall
ansible.builtin.debug:
var: iptables_state.initial_state
-'''
+"""
-RETURN = r'''
+RETURN = r"""
applied:
description: Whether or not the wanted state has been successfully restored.
type: bool
@@ -164,7 +147,8 @@ initial_state:
type: list
elements: str
returned: always
- sample: [
+ sample:
+ [
"# Generated by xtables-save v1.8.2",
"*filter",
":INPUT ACCEPT [0:0]",
@@ -178,7 +162,8 @@ restored:
type: list
elements: str
returned: always
- sample: [
+ sample:
+ [
"# Generated by xtables-save v1.8.2",
"*filter",
":INPUT DROP [0:0]",
@@ -197,7 +182,8 @@ saved:
type: list
elements: str
returned: always
- sample: [
+ sample:
+ [
"# Generated by xtables-save v1.8.2",
"*filter",
":INPUT ACCEPT [0:0]",
@@ -235,7 +221,7 @@ tables:
]
}
returned: always
-'''
+"""
import re
diff --git a/plugins/modules/ipwcli_dns.py b/plugins/modules/ipwcli_dns.py
index 3ffad79fb6..604eb82b5f 100644
--- a/plugins/modules/ipwcli_dns.py
+++ b/plugins/modules/ipwcli_dns.py
@@ -8,127 +8,124 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
module: ipwcli_dns
-short_description: Manage DNS Records for Ericsson IPWorks via ipwcli
+short_description: Manage DNS Records for Ericsson IPWorks using C(ipwcli)
version_added: '0.2.0'
description:
- - "Manage DNS records for the Ericsson IPWorks DNS server. The module will use the ipwcli to deploy the DNS records."
-
+ - Manage DNS records for the Ericsson IPWorks DNS server. The module uses the C(ipwcli) to deploy the DNS records.
requirements:
- - ipwcli (installed on Ericsson IPWorks)
+ - ipwcli (installed on Ericsson IPWorks)
notes:
- - To make the DNS record changes effective, you need to run C(update dnsserver) on the ipwcli.
-
+ - To make the DNS record changes effective, you need to run C(update dnsserver) on the ipwcli.
extends_documentation_fragment:
- - community.general.attributes
+ - community.general.attributes
attributes:
- check_mode:
- support: full
- diff_mode:
- support: none
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
options:
- dnsname:
- description:
- - Name of the record.
- required: true
- type: str
- type:
- description:
- - Type of the record.
- required: true
- type: str
- choices: [ NAPTR, SRV, A, AAAA ]
- container:
- description:
- - Sets the container zone for the record.
- required: true
- type: str
- address:
- description:
- - The IP address for the A or AAAA record.
- - Required for O(type=A) or O(type=AAAA).
- type: str
- ttl:
- description:
- - Sets the TTL of the record.
- type: int
- default: 3600
- state:
- description:
- - Whether the record should exist or not.
- type: str
- choices: [ absent, present ]
- default: present
- priority:
- description:
- - Sets the priority of the SRV record.
- type: int
- default: 10
- weight:
- description:
- - Sets the weight of the SRV record.
- type: int
- default: 10
- port:
- description:
- - Sets the port of the SRV record.
- - Required for O(type=SRV).
- type: int
- target:
- description:
- - Sets the target of the SRV record.
- - Required for O(type=SRV).
- type: str
- order:
- description:
- - Sets the order of the NAPTR record.
- - Required for O(type=NAPTR).
- type: int
- preference:
- description:
- - Sets the preference of the NAPTR record.
- - Required for O(type=NAPTR).
- type: int
- flags:
- description:
- - Sets one of the possible flags of NAPTR record.
- - Required for O(type=NAPTR).
- type: str
- choices: ['S', 'A', 'U', 'P']
- service:
- description:
- - Sets the service of the NAPTR record.
- - Required for O(type=NAPTR).
- type: str
- replacement:
- description:
- - Sets the replacement of the NAPTR record.
- - Required for O(type=NAPTR).
- type: str
- username:
- description:
- - Username to login on ipwcli.
- type: str
- required: true
- password:
- description:
- - Password to login on ipwcli.
- type: str
- required: true
+ dnsname:
+ description:
+ - Name of the record.
+ required: true
+ type: str
+ type:
+ description:
+ - Type of the record.
+ required: true
+ type: str
+ choices: [NAPTR, SRV, A, AAAA]
+ container:
+ description:
+ - Sets the container zone for the record.
+ required: true
+ type: str
+ address:
+ description:
+ - The IP address for the A or AAAA record.
+ - Required for O(type=A) or O(type=AAAA).
+ type: str
+ ttl:
+ description:
+ - Sets the TTL of the record.
+ type: int
+ default: 3600
+ state:
+ description:
+ - Whether the record should exist or not.
+ type: str
+ choices: [absent, present]
+ default: present
+ priority:
+ description:
+ - Sets the priority of the SRV record.
+ type: int
+ default: 10
+ weight:
+ description:
+ - Sets the weight of the SRV record.
+ type: int
+ default: 10
+ port:
+ description:
+ - Sets the port of the SRV record.
+ - Required for O(type=SRV).
+ type: int
+ target:
+ description:
+ - Sets the target of the SRV record.
+ - Required for O(type=SRV).
+ type: str
+ order:
+ description:
+ - Sets the order of the NAPTR record.
+ - Required for O(type=NAPTR).
+ type: int
+ preference:
+ description:
+ - Sets the preference of the NAPTR record.
+ - Required for O(type=NAPTR).
+ type: int
+ flags:
+ description:
+ - Sets one of the possible flags of NAPTR record.
+ - Required for O(type=NAPTR).
+ type: str
+ choices: ['S', 'A', 'U', 'P']
+ service:
+ description:
+ - Sets the service of the NAPTR record.
+ - Required for O(type=NAPTR).
+ type: str
+ replacement:
+ description:
+ - Sets the replacement of the NAPTR record.
+ - Required for O(type=NAPTR).
+ type: str
+ username:
+ description:
+ - Username to login on ipwcli.
+ type: str
+ required: true
+ password:
+ description:
+ - Password to login on ipwcli.
+ type: str
+ required: true
author:
- - Christian Wollinger (@cwollinger)
-'''
+ - Christian Wollinger (@cwollinger)
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Create A record
community.general.ipwcli_dns:
dnsname: example.com
@@ -157,14 +154,14 @@ EXAMPLES = '''
service: 'SIP+D2T'
replacement: '_sip._tcp.test.example.com.'
flags: S
-'''
+"""
-RETURN = '''
+RETURN = r"""
record:
- description: The created record from the input params
- type: str
- returned: always
-'''
+ description: The created record from the input params.
+ type: str
+ returned: always
+"""
from ansible.module_utils.basic import AnsibleModule
@@ -274,18 +271,18 @@ def run_module():
dnsname=dict(type='str', required=True),
type=dict(type='str', required=True, choices=['A', 'AAAA', 'SRV', 'NAPTR']),
container=dict(type='str', required=True),
- address=dict(type='str', required=False),
- ttl=dict(type='int', required=False, default=3600),
+ address=dict(type='str'),
+ ttl=dict(type='int', default=3600),
state=dict(type='str', default='present', choices=['absent', 'present']),
- priority=dict(type='int', required=False, default=10),
- weight=dict(type='int', required=False, default=10),
- port=dict(type='int', required=False),
- target=dict(type='str', required=False),
- order=dict(type='int', required=False),
- preference=dict(type='int', required=False),
- flags=dict(type='str', required=False, choices=['S', 'A', 'U', 'P']),
- service=dict(type='str', required=False),
- replacement=dict(type='str', required=False),
+ priority=dict(type='int', default=10),
+ weight=dict(type='int', default=10),
+ port=dict(type='int'),
+ target=dict(type='str'),
+ order=dict(type='int'),
+ preference=dict(type='int'),
+ flags=dict(type='str', choices=['S', 'A', 'U', 'P']),
+ service=dict(type='str'),
+ replacement=dict(type='str'),
username=dict(type='str', required=True),
password=dict(type='str', required=True, no_log=True)
)
diff --git a/plugins/modules/irc.py b/plugins/modules/irc.py
index e40ba2d0ba..d18c9fd85f 100644
--- a/plugins/modules/irc.py
+++ b/plugins/modules/irc.py
@@ -9,8 +9,7 @@ from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
module: irc
short_description: Send a message to an IRC channel or a nick
description:
@@ -26,12 +25,12 @@ options:
server:
type: str
description:
- - IRC server name/address
+ - IRC server name/address.
default: localhost
port:
type: int
description:
- - IRC server port number
+ - IRC server port number.
default: 6667
nick:
type: str
@@ -46,90 +45,102 @@ options:
topic:
type: str
description:
- - Set the channel topic
+ - Set the channel topic.
color:
type: str
description:
- Text color for the message.
default: "none"
- choices: [ "none", "white", "black", "blue", "green", "red", "brown", "purple", "orange", "yellow", "light_green", "teal", "light_cyan",
- "light_blue", "pink", "gray", "light_gray"]
+ choices:
+ - none
+ - white
+ - black
+ - blue
+ - green
+ - red
+ - brown
+ - purple
+ - orange
+ - yellow
+ - light_green
+ - teal
+ - light_cyan
+ - light_blue
+ - pink
+ - gray
+ - light_gray
aliases: [colour]
channel:
type: str
description:
- - Channel name. One of nick_to or channel needs to be set. When both are set, the message will be sent to both of them.
+ - Channel name. One of nick_to or channel needs to be set. When both are set, the message is sent to both of them.
nick_to:
type: list
elements: str
description:
- - A list of nicknames to send the message to. One of nick_to or channel needs to be set. When both are defined, the message will be sent to both of them.
+ - A list of nicknames to send the message to. One of nick_to or channel needs to be set. When both are defined, the
+ message is sent to both of them.
key:
type: str
description:
- - Channel key
+ - Channel key.
passwd:
type: str
description:
- - Server password
+ - Server password.
timeout:
type: int
description:
- - Timeout to use while waiting for successful registration and join
- messages, this is to prevent an endless loop
+ - Timeout to use while waiting for successful registration and join messages, this is to prevent an endless loop.
default: 30
use_tls:
description:
- - Designates whether TLS/SSL should be used when connecting to the IRC server
- - O(use_tls) is available since community.general 8.1.0, before the option
- was exlusively called O(use_ssl). The latter is now an alias of O(use_tls).
- - B(Note:) for security reasons, you should always set O(use_tls=true) and
- O(validate_certs=true) whenever possible.
- - The option currently defaults to V(false). The default has been B(deprecated) and will
- change to V(true) in community.general 10.0.0. To avoid deprecation warnings, explicitly
- set this option to a value (preferably V(true)).
+ - Designates whether TLS/SSL should be used when connecting to the IRC server.
+ - O(use_tls) is available since community.general 8.1.0, before the option was exlusively called O(use_ssl). The latter
+ is now an alias of O(use_tls).
+ - B(Note:) for security reasons, you should always set O(use_tls=true) and O(validate_certs=true) whenever possible.
+ - The default of this option changed to V(true) in community.general 10.0.0.
type: bool
+ default: true
aliases:
- use_ssl
part:
description:
- - Designates whether user should part from channel after sending message or not.
- Useful for when using a faux bot and not wanting join/parts between messages.
+ - Designates whether user should part from channel after sending message or not. Useful for when using a mock bot and
+ not wanting join/parts between messages.
type: bool
default: true
style:
type: str
description:
- - Text style for the message. Note italic does not work on some clients
- choices: [ "bold", "underline", "reverse", "italic", "none" ]
+ - Text style for the message. Note italic does not work on some clients.
+ choices: ["bold", "underline", "reverse", "italic", "none"]
default: none
validate_certs:
description:
- - If set to V(false), the SSL certificates will not be validated.
- - This should always be set to V(true). Using V(false) is unsafe and should only be done
- if the network between between Ansible and the IRC server is known to be safe.
- - B(Note:) for security reasons, you should always set O(use_tls=true) and
- O(validate_certs=true) whenever possible.
- - The option currently defaults to V(false). The default has been B(deprecated) and will
- change to V(true) in community.general 10.0.0. To avoid deprecation warnings, explicitly
- set this option to a value (preferably V(true)).
+ - If set to V(false), the SSL certificates are not validated.
+ - This should always be set to V(true). Using V(false) is unsafe and should only be done if the network between between
+ Ansible and the IRC server is known to be safe.
+ - B(Note:) for security reasons, you should always set O(use_tls=true) and O(validate_certs=true) whenever possible.
+ - The default of this option changed to V(true) in community.general 10.0.0.
type: bool
+ default: true
version_added: 8.1.0
# informational: requirements for nodes
-requirements: [ socket ]
+requirements: [socket]
author:
- - "Jan-Piet Mens (@jpmens)"
- - "Matt Martz (@sivel)"
-'''
+ - "Jan-Piet Mens (@jpmens)"
+ - "Matt Martz (@sivel)"
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Send a message to an IRC channel from nick ansible
community.general.irc:
server: irc.example.net
use_tls: true
validate_certs: true
- channel: #t1
+ channel: '#t1'
msg: Hello world
- name: Send a message to an IRC channel
@@ -139,7 +150,7 @@ EXAMPLES = '''
server: irc.example.net
use_tls: true
validate_certs: true
- channel: #t1
+ channel: '#t1'
msg: 'All finished at {{ ansible_date_time.iso8601 }}'
color: red
nick: ansibleIRC
@@ -151,14 +162,14 @@ EXAMPLES = '''
server: irc.example.net
use_tls: true
validate_certs: true
- channel: #t1
+ channel: '#t1'
nick_to:
- nick1
- nick2
msg: 'All finished at {{ ansible_date_time.iso8601 }}'
color: red
nick: ansibleIRC
-'''
+"""
# ===========================================
# IRC module support methods.
@@ -221,9 +232,11 @@ def send_msg(msg, server='localhost', port='6667', channel=None, nick_to=None, k
irc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if use_tls:
+ kwargs = {}
if validate_certs:
try:
context = ssl.create_default_context()
+ kwargs["server_hostname"] = server
except AttributeError:
raise Exception('Need at least Python 2.7.9 for SSL certificate validation')
else:
@@ -233,7 +246,7 @@ def send_msg(msg, server='localhost', port='6667', channel=None, nick_to=None, k
else:
context = ssl.SSLContext()
context.verify_mode = ssl.CERT_NONE
- irc = context.wrap_socket(irc)
+ irc = context.wrap_socket(irc, **kwargs)
irc.connect((server, int(port)))
if passwd:
@@ -298,7 +311,7 @@ def main():
server=dict(default='localhost'),
port=dict(type='int', default=6667),
nick=dict(default='ansible'),
- nick_to=dict(required=False, type='list', elements='str'),
+ nick_to=dict(type='list', elements='str'),
msg=dict(required=True),
color=dict(default="none", aliases=['colour'], choices=["white", "black", "blue",
"green", "red", "brown",
@@ -307,14 +320,14 @@ def main():
"light_blue", "pink", "gray",
"light_gray", "none"]),
style=dict(default="none", choices=["underline", "reverse", "bold", "italic", "none"]),
- channel=dict(required=False),
+ channel=dict(),
key=dict(no_log=True),
topic=dict(),
passwd=dict(no_log=True),
timeout=dict(type='int', default=30),
part=dict(type='bool', default=True),
- use_tls=dict(type='bool', aliases=['use_ssl']),
- validate_certs=dict(type='bool'),
+ use_tls=dict(type='bool', default=True, aliases=['use_ssl']),
+ validate_certs=dict(type='bool', default=True),
),
supports_check_mode=True,
required_one_of=[['channel', 'nick_to']]
@@ -338,25 +351,6 @@ def main():
style = module.params["style"]
validate_certs = module.params["validate_certs"]
- if use_tls is None:
- module.deprecate(
- 'The default of use_tls will change to true in community.general 10.0.0.'
- ' Set a value now (preferably true, if possible) to avoid the deprecation warning.',
- version='10.0.0',
- collection_name='community.general',
- )
- use_tls = False
-
- if validate_certs is None:
- if use_tls:
- module.deprecate(
- 'The default of validate_certs will change to true in community.general 10.0.0.'
- ' Set a value now (prefarably true, if possible) to avoid the deprecation warning.',
- version='10.0.0',
- collection_name='community.general',
- )
- validate_certs = False
-
try:
send_msg(msg, server, port, channel, nick_to, key, topic, nick, color, passwd, timeout, use_tls, validate_certs, part, style)
except Exception as e:
diff --git a/plugins/modules/iso_create.py b/plugins/modules/iso_create.py
index c39c710d53..70f76558e6 100644
--- a/plugins/modules/iso_create.py
+++ b/plugins/modules/iso_create.py
@@ -9,8 +9,7 @@
from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = r'''
----
+DOCUMENTATION = r"""
module: iso_create
short_description: Generate ISO file with specified files or folders
description:
@@ -31,60 +30,60 @@ attributes:
support: none
options:
- src_files:
- description:
- - This is a list of absolute paths of source files or folders which will be contained in the new generated ISO file.
- - Will fail if specified file or folder in O(src_files) does not exist on local machine.
- - 'Note: With all ISO9660 levels from 1 to 3, all file names are restricted to uppercase letters, numbers and
- underscores (_). File names are limited to 31 characters, directory nesting is limited to 8 levels, and path
- names are limited to 255 characters.'
- type: list
- required: true
- elements: path
- dest_iso:
- description:
- - The absolute path with file name of the new generated ISO file on local machine.
- - Will create intermediate folders when they does not exist.
- type: path
- required: true
- interchange_level:
- description:
- - The ISO9660 interchange level to use, it dictates the rules on the names of files.
- - Levels and valid values V(1), V(2), V(3), V(4) are supported.
- - The default value is level V(1), which is the most conservative, level V(3) is recommended.
- - ISO9660 file names at interchange level V(1) cannot have more than 8 characters or 3 characters in the extension.
- type: int
- default: 1
- choices: [1, 2, 3, 4]
- vol_ident:
- description:
- - The volume identification string to use on the new generated ISO image.
- type: str
- rock_ridge:
- description:
- - Whether to make this ISO have the Rock Ridge extensions or not.
- - Valid values are V(1.09), V(1.10) or V(1.12), means adding the specified Rock Ridge version to the ISO.
- - If unsure, set V(1.09) to ensure maximum compatibility.
- - If not specified, then not add Rock Ridge extension to the ISO.
- type: str
- choices: ['1.09', '1.10', '1.12']
- joliet:
- description:
- - Support levels and valid values are V(1), V(2), or V(3).
- - Level V(3) is by far the most common.
- - If not specified, then no Joliet support is added.
- type: int
- choices: [1, 2, 3]
- udf:
- description:
- - Whether to add UDF support to this ISO.
- - If set to V(true), then version 2.60 of the UDF spec is used.
- - If not specified or set to V(false), then no UDF support is added.
- type: bool
- default: false
-'''
+ src_files:
+ description:
+ - This is a list of absolute paths of source files or folders to be contained in the new generated ISO file.
+ - The module fails if specified file or folder in O(src_files) does not exist on local machine.
+ - 'Note: With all ISO9660 levels from 1 to 3, all file names are restricted to uppercase letters, numbers and underscores
+ (_). File names are limited to 31 characters, directory nesting is limited to 8 levels, and path names are limited
+ to 255 characters.'
+ type: list
+ required: true
+ elements: path
+ dest_iso:
+ description:
+ - The absolute path with file name of the new generated ISO file on local machine.
+ - It creates intermediate folders when they do not exist.
+ type: path
+ required: true
+ interchange_level:
+ description:
+ - The ISO9660 interchange level to use, it dictates the rules on the names of files.
+ - Levels and valid values V(1), V(2), V(3), V(4) are supported.
+ - The default value is level V(1), which is the most conservative, level V(3) is recommended.
+ - ISO9660 file names at interchange level V(1) cannot have more than 8 characters or 3 characters in the extension.
+ type: int
+ default: 1
+ choices: [1, 2, 3, 4]
+ vol_ident:
+ description:
+ - The volume identification string to use on the new generated ISO image.
+ type: str
+ rock_ridge:
+ description:
+ - Whether to make this ISO have the Rock Ridge extensions or not.
+ - Valid values are V(1.09), V(1.10) or V(1.12), means adding the specified Rock Ridge version to the ISO.
+ - If unsure, set V(1.09) to ensure maximum compatibility.
+ - If not specified, then not add Rock Ridge extension to the ISO.
+ type: str
+ choices: ['1.09', '1.10', '1.12']
+ joliet:
+ description:
+ - Support levels and valid values are V(1), V(2), or V(3).
+ - Level V(3) is by far the most common.
+ - If not specified, then no Joliet support is added.
+ type: int
+ choices: [1, 2, 3]
+ udf:
+ description:
+ - Whether to add UDF support to this ISO.
+ - If set to V(true), then version 2.60 of the UDF spec is used.
+ - If not specified or set to V(false), then no UDF support is added.
+ type: bool
+ default: false
+"""
-EXAMPLES = r'''
+EXAMPLES = r"""
- name: Create an ISO file
community.general.iso_create:
src_files:
@@ -109,46 +108,46 @@ EXAMPLES = r'''
interchange_level: 3
joliet: 3
vol_ident: WIN_AUTOINSTALL
-'''
+"""
-RETURN = r'''
+RETURN = r"""
source_file:
- description: Configured source files or directories list.
- returned: on success
- type: list
- elements: path
- sample: ["/path/to/file.txt", "/path/to/folder"]
+ description: Configured source files or directories list.
+ returned: on success
+ type: list
+ elements: path
+ sample: ["/path/to/file.txt", "/path/to/folder"]
created_iso:
- description: Created iso file path.
- returned: on success
- type: str
- sample: "/path/to/test.iso"
+ description: Created iso file path.
+ returned: on success
+ type: str
+ sample: "/path/to/test.iso"
interchange_level:
- description: Configured interchange level.
- returned: on success
- type: int
- sample: 3
+ description: Configured interchange level.
+ returned: on success
+ type: int
+ sample: 3
vol_ident:
- description: Configured volume identification string.
- returned: on success
- type: str
- sample: "OEMDRV"
+ description: Configured volume identification string.
+ returned: on success
+ type: str
+ sample: "OEMDRV"
joliet:
- description: Configured Joliet support level.
- returned: on success
- type: int
- sample: 3
+ description: Configured Joliet support level.
+ returned: on success
+ type: int
+ sample: 3
rock_ridge:
- description: Configured Rock Ridge version.
- returned: on success
- type: str
- sample: "1.09"
+ description: Configured Rock Ridge version.
+ returned: on success
+ type: str
+ sample: "1.09"
udf:
- description: Configured UDF support.
- returned: on success
- type: bool
- sample: false
-'''
+ description: Configured UDF support.
+ returned: on success
+ type: bool
+ sample: false
+"""
import os
import traceback
diff --git a/plugins/modules/iso_customize.py b/plugins/modules/iso_customize.py
index 543faaa5ef..5ee5b22c2c 100644
--- a/plugins/modules/iso_customize.py
+++ b/plugins/modules/iso_customize.py
@@ -9,13 +9,12 @@
from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = r'''
----
+DOCUMENTATION = r"""
module: iso_customize
short_description: Add/remove/change files in ISO file
description:
- This module is used to add/remove/change files in ISO file.
- - The file inside ISO will be overwritten if it exists by option O(add_files).
+ - The file inside ISO is overwritten if it exists by option O(add_files).
author:
- Yuhua Zou (@ZouYuhua)
requirements:
@@ -34,25 +33,25 @@ attributes:
options:
src_iso:
description:
- - This is the path of source ISO file.
+ - This is the path of source ISO file.
type: path
required: true
dest_iso:
description:
- - The path of the customized ISO file.
+ - The path of the customized ISO file.
type: path
required: true
delete_files:
description:
- - Absolute paths for files inside the ISO file that should be removed.
+ - Absolute paths for files inside the ISO file that should be removed.
type: list
required: false
elements: str
default: []
add_files:
description:
- - Allows to add and replace files in the ISO file.
- - Will create intermediate folders inside the ISO file when they do not exist.
+ - Allows to add and replace files in the ISO file.
+ - It creates intermediate folders inside the ISO file when they do not exist.
type: list
required: false
elements: dict
@@ -60,23 +59,22 @@ options:
suboptions:
src_file:
description:
- - The path with file name on the machine the module is executed on.
+ - The path with file name on the machine the module is executed on.
type: path
required: true
dest_file:
description:
- - The absolute path of the file inside the ISO file.
+ - The absolute path of the file inside the ISO file.
type: str
required: true
notes:
-- The C(pycdlib) library states it supports Python 2.7 and 3.4+.
-- >
- The function C(add_file) in pycdlib will overwrite the existing file in ISO with type ISO9660 / Rock Ridge 1.12 / Joliet / UDF.
- But it will not overwrite the existing file in ISO with Rock Ridge 1.09 / 1.10.
- So we take workaround "delete the existing file and then add file for ISO with Rock Ridge".
-'''
+ - The C(pycdlib) library states it supports Python 2.7 and 3.4+.
+ - The function C(add_file) in pycdlib is designed to overwrite the existing file in ISO with type ISO9660 / Rock Ridge 1.12
+ / Joliet / UDF. But it does not overwrite the existing file in ISO with Rock Ridge 1.09 / 1.10. So we take workaround
+ "delete the existing file and then add file for ISO with Rock Ridge".
+"""
-EXAMPLES = r'''
+EXAMPLES = r"""
- name: "Customize ISO file"
community.general.iso_customize:
src_iso: "/path/to/ubuntu-22.04-desktop-amd64.iso"
@@ -89,9 +87,9 @@ EXAMPLES = r'''
- src_file: "/path/to/ubuntu.seed"
dest_file: "/preseed/ubuntu.seed"
register: customize_iso_result
-'''
+"""
-RETURN = r'''
+RETURN = r"""
src_iso:
description: Path of source ISO file.
returned: on success
@@ -102,7 +100,7 @@ dest_iso:
returned: on success
type: str
sample: "/path/to/customized.iso"
-'''
+"""
import os
diff --git a/plugins/modules/iso_extract.py b/plugins/modules/iso_extract.py
index 087ef2843f..88644a6eb6 100644
--- a/plugins/modules/iso_extract.py
+++ b/plugins/modules/iso_extract.py
@@ -11,8 +11,7 @@
from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = r'''
----
+DOCUMENTATION = r"""
author:
- Jeroen Hoekx (@jhoekx)
- Matt Robinson (@ribbons)
@@ -21,12 +20,10 @@ module: iso_extract
short_description: Extract files from an ISO image
description:
- This module has two possible ways of operation.
- - If 7zip is installed on the system, this module extracts files from an ISO
- into a temporary directory and copies files to a given destination,
- if needed.
- - If the user has mount-capabilities (CAP_SYS_ADMIN on Linux) this module
- mounts the ISO image to a temporary location, and copies files to a given
- destination, if needed.
+ - If 7zip is installed on the system, this module extracts files from an ISO into a temporary directory and copies files
+ to a given destination, if needed.
+ - If the user has mount-capabilities (CAP_SYS_ADMIN on Linux) this module mounts the ISO image to a temporary location,
+ and copies files to a given destination, if needed.
requirements:
- Either 7z (from C(7zip) or C(p7zip) package)
- Or mount capabilities (root-access, or CAP_SYS_ADMIN capability on Linux)
@@ -40,51 +37,59 @@ attributes:
options:
image:
description:
- - The ISO image to extract files from.
+ - The ISO image to extract files from.
type: path
required: true
- aliases: [ path, src ]
+ aliases: [path, src]
dest:
description:
- - The destination directory to extract files to.
+ - The destination directory to extract files to.
type: path
required: true
files:
description:
- - A list of files to extract from the image.
- - Extracting directories does not work.
+ - A list of files to extract from the image.
+ - Extracting directories does not work.
type: list
elements: str
required: true
force:
description:
- - If V(true), which will replace the remote file when contents are different than the source.
- - If V(false), the file will only be extracted and copied if the destination does not already exist.
+ - If V(true), it replaces the remote file when contents are different than the source.
+ - If V(false), the file is only extracted and copied if the destination does not already exist.
type: bool
default: true
executable:
description:
- - The path to the C(7z) executable to use for extracting files from the ISO.
- - If not provided, it will assume the value V(7z).
+ - The path to the C(7z) executable to use for extracting files from the ISO.
+ - If not provided, it assumes the value V(7z).
type: path
+ password:
+ description:
+ - Password used to decrypt files from the ISO.
+ - It is only used if C(7z) is used.
+ - The password is used as a command line argument to 7z. This is a B(potential security risk) that allows passwords
+ to be revealed if someone else can list running processes on the same machine in the right moment.
+ type: str
+ version_added: 10.1.0
notes:
-- Only the file checksum (content) is taken into account when extracting files
- from the ISO image. If O(force=false), only checks the presence of the file.
-'''
+ - Only the file checksum (content) is taken into account when extracting files from the ISO image. If O(force=false), only
+ checks the presence of the file.
+"""
-EXAMPLES = r'''
+EXAMPLES = r"""
- name: Extract kernel and ramdisk from a LiveCD
community.general.iso_extract:
image: /tmp/rear-test.iso
dest: /tmp/virt-rear/
files:
- - isolinux/kernel
- - isolinux/initrd.cgz
-'''
+ - isolinux/kernel
+ - isolinux/initrd.cgz
+"""
-RETURN = r'''
+RETURN = r"""
#
-'''
+"""
import os.path
import shutil
@@ -100,6 +105,7 @@ def main():
dest=dict(type='path', required=True),
files=dict(type='list', elements='str', required=True),
force=dict(type='bool', default=True),
+ password=dict(type='str', no_log=True),
executable=dict(type='path'), # No default on purpose
),
supports_check_mode=True,
@@ -108,6 +114,7 @@ def main():
dest = module.params['dest']
files = module.params['files']
force = module.params['force']
+ password = module.params['password']
executable = module.params['executable']
result = dict(
@@ -154,7 +161,10 @@ def main():
# Use 7zip when we have a binary, otherwise try to mount
if binary:
- cmd = [binary, 'x', image, '-o%s' % tmp_dir] + extract_files
+ cmd = [binary, 'x', image, '-o%s' % tmp_dir]
+ if password:
+ cmd += ["-p%s" % password]
+ cmd += extract_files
else:
cmd = [module.get_bin_path('mount'), '-o', 'loop,ro', image, tmp_dir]
diff --git a/plugins/modules/jabber.py b/plugins/modules/jabber.py
index 650b29957d..ab73672410 100644
--- a/plugins/modules/jabber.py
+++ b/plugins/modules/jabber.py
@@ -9,12 +9,11 @@ from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
module: jabber
short_description: Send a message to jabber user or chat room
description:
- - Send a message to jabber
+ - Send a message to jabber.
extends_documentation_fragment:
- community.general.attributes
attributes:
@@ -26,17 +25,17 @@ options:
user:
type: str
description:
- - User as which to connect
+ - User as which to connect.
required: true
password:
type: str
description:
- - password for user to connect
+ - Password for user to connect.
required: true
to:
type: str
description:
- - user ID or name of the room, when using room use a slash to indicate your nick.
+ - User ID or name of the room, when using room use a slash to indicate your nick.
required: true
msg:
type: str
@@ -46,24 +45,22 @@ options:
host:
type: str
description:
- - host to connect, overrides user info
+ - Host to connect, overrides user info.
port:
type: int
description:
- - port to connect to, overrides default
+ - Port to connect to, overrides default.
default: 5222
encoding:
type: str
description:
- - message encoding
-
-# informational: requirements for nodes
+ - Message encoding.
requirements:
- - python xmpp (xmpppy)
+ - python xmpp (xmpppy)
author: "Brian Coca (@bcoca)"
-'''
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Send a message to a user
community.general.jabber:
user: mybot@example.net
@@ -86,7 +83,7 @@ EXAMPLES = '''
password: secret
to: mychaps@example.net
msg: Ansible task finished
-'''
+"""
import time
import traceback
@@ -111,9 +108,9 @@ def main():
password=dict(required=True, no_log=True),
to=dict(required=True),
msg=dict(required=True),
- host=dict(required=False),
- port=dict(required=False, default=5222, type='int'),
- encoding=dict(required=False),
+ host=dict(),
+ port=dict(default=5222, type='int'),
+ encoding=dict(),
),
supports_check_mode=True
)
diff --git a/plugins/modules/java_cert.py b/plugins/modules/java_cert.py
index e2d04b71e2..13cfea9324 100644
--- a/plugins/modules/java_cert.py
+++ b/plugins/modules/java_cert.py
@@ -8,14 +8,13 @@
from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = r'''
----
+DOCUMENTATION = r"""
module: java_cert
short_description: Uses keytool to import/remove certificate to/from java keystore (cacerts)
description:
- - This is a wrapper module around keytool, which can be used to import certificates
- and optionally private keys to a given java keystore, or remove them from it.
+ - This is a wrapper module around keytool, which can be used to import certificates and optionally private keys to a given
+ java keystore, or remove them from it.
extends_documentation_fragment:
- community.general.attributes
- ansible.builtin.files
@@ -33,7 +32,7 @@ options:
cert_port:
description:
- Port to connect to URL.
- - This will be used to create server URL:PORT.
+ - This is used to create server URL:PORT.
type: int
default: 443
cert_path:
@@ -61,9 +60,8 @@ options:
pkcs12_path:
description:
- Local path to load PKCS12 keystore from.
- - Unlike O(cert_url), O(cert_path) and O(cert_content), the PKCS12 keystore embeds the private key matching
- the certificate, and is used to import both the certificate and its private key into the
- java keystore.
+ - Unlike O(cert_url), O(cert_path) and O(cert_content), the PKCS12 keystore embeds the private key matching the certificate,
+ and is used to import both the certificate and its private key into the java keystore.
- Exactly one of O(cert_url), O(cert_path), O(cert_content), or O(pkcs12_path) is required to load certificate.
type: path
pkcs12_password:
@@ -100,10 +98,10 @@ options:
state:
description:
- Defines action which can be either certificate import or removal.
- - When state is present, the certificate will always idempotently be inserted
- into the keystore, even if there already exists a cert alias that is different.
+ - When O(state=present), the certificate is always inserted into the keystore, even if there already exists a cert alias
+ that is different.
type: str
- choices: [ absent, present ]
+ choices: [absent, present]
default: present
mode:
version_added: 8.5.0
@@ -125,10 +123,10 @@ options:
version_added: 8.5.0
requirements: [openssl, keytool]
author:
-- Adam Hamsik (@haad)
-'''
+ - Adam Hamsik (@haad)
+"""
-EXAMPLES = r'''
+EXAMPLES = r"""
- name: Import SSL certificate from google.com to a given cacerts keystore
community.general.java_cert:
cert_url: google.com
@@ -196,27 +194,15 @@ EXAMPLES = r'''
keystore_pass: changeit
keystore_create: true
state: present
-'''
-
-RETURN = r'''
-msg:
- description: Output from stdout of keytool command after execution of given command.
- returned: success
- type: str
- sample: "Module require existing keystore at keystore_path '/tmp/test/cacerts'"
-
-rc:
- description: Keytool command execution return value.
- returned: success
- type: int
- sample: "0"
+"""
+RETURN = r"""
cmd:
description: Executed command to get action done.
returned: success
type: str
sample: "keytool -importcert -noprompt -keystore"
-'''
+"""
import os
import tempfile
@@ -280,7 +266,7 @@ def _get_first_certificate_from_x509_file(module, pem_certificate_file, pem_cert
(extract_rc, dummy, extract_stderr) = module.run_command(extract_cmd, check_rc=False)
if extract_rc != 0:
- # this time it's a real failure
+ # this time it is a real failure
module.fail_json(msg="Internal module failure, cannot extract certificate, error: %s" % extract_stderr,
rc=extract_rc, cmd=extract_cmd)
@@ -317,12 +303,13 @@ def _export_public_cert_from_pkcs12(module, executable, pkcs_file, alias, passwo
"-noprompt",
"-keystore",
pkcs_file,
- "-alias",
- alias,
"-storetype",
"pkcs12",
"-rfc"
]
+ # Append optional alias
+ if alias:
+ export_cmd.extend(["-alias", alias])
(export_rc, export_stdout, export_err) = module.run_command(export_cmd, data=password, check_rc=False)
if export_rc != 0:
@@ -395,6 +382,10 @@ def import_pkcs12_path(module, executable, pkcs12_path, pkcs12_pass, pkcs12_alia
keystore_path, keystore_pass, keystore_alias, keystore_type):
''' Import pkcs12 from path into keystore located on
keystore_path as alias '''
+ optional_aliases = {
+ "-destalias": keystore_alias,
+ "-srcalias": pkcs12_alias
+ }
import_cmd = [
executable,
"-importkeystore",
@@ -403,13 +394,14 @@ def import_pkcs12_path(module, executable, pkcs12_path, pkcs12_pass, pkcs12_alia
"pkcs12",
"-srckeystore",
pkcs12_path,
- "-srcalias",
- pkcs12_alias,
"-destkeystore",
keystore_path,
- "-destalias",
- keystore_alias
]
+ # Append optional aliases
+ for flag, value in optional_aliases.items():
+ if value:
+ import_cmd.extend([flag, value])
+
import_cmd += _get_keystore_type_keytool_parameters(keystore_type)
secret_data = "%s\n%s" % (keystore_pass, pkcs12_pass)
diff --git a/plugins/modules/java_keystore.py b/plugins/modules/java_keystore.py
index 0a8e3398d5..c826c9af4c 100644
--- a/plugins/modules/java_keystore.py
+++ b/plugins/modules/java_keystore.py
@@ -10,8 +10,7 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
module: java_keystore
short_description: Create a Java keystore in JKS format
description:
@@ -25,25 +24,22 @@ options:
name:
description:
- Name of the certificate in the keystore.
- - If the provided name does not exist in the keystore, the module
- will re-create the keystore. This behavior changed in community.general 3.0.0,
- before that the module would fail when the name did not match.
+ - If the provided name does not exist in the keystore, the module re-creates the keystore. This behavior changed in
+ community.general 3.0.0, before that the module would fail when the name did not match.
type: str
required: true
certificate:
description:
- Content of the certificate used to create the keystore.
- - If the fingerprint of the provided certificate does not match the
- fingerprint of the certificate bundled in the keystore, the keystore
- is regenerated with the provided certificate.
+ - If the fingerprint of the provided certificate does not match the fingerprint of the certificate bundled in the keystore,
+ the keystore is regenerated with the provided certificate.
- Exactly one of O(certificate) or O(certificate_path) is required.
type: str
certificate_path:
description:
- Location of the certificate used to create the keystore.
- - If the fingerprint of the provided certificate does not match the
- fingerprint of the certificate bundled in the keystore, the keystore
- is regenerated with the provided certificate.
+ - If the fingerprint of the provided certificate does not match the fingerprint of the certificate bundled in the keystore,
+ the keystore is regenerated with the provided certificate.
- Exactly one of O(certificate) or O(certificate_path) is required.
type: path
version_added: '3.0.0'
@@ -66,10 +62,8 @@ options:
password:
description:
- Password that should be used to secure the keystore.
- - If the provided password fails to unlock the keystore, the module
- will re-create the keystore with the new passphrase. This behavior
- changed in community.general 3.0.0, before that the module would fail
- when the password did not match.
+ - If the provided password fails to unlock the keystore, the module re-creates the keystore with the new passphrase.
+ This behavior changed in community.general 3.0.0, before that the module would fail when the password did not match.
type: str
required: true
dest:
@@ -106,16 +100,13 @@ options:
keystore_type:
description:
- Type of the Java keystore.
- - When this option is omitted and the keystore doesn't already exist, the
- behavior follows C(keytool)'s default store type which depends on
- Java version; V(pkcs12) since Java 9 and V(jks) prior (may also
- be V(pkcs12) if new default has been backported to this version).
- - When this option is omitted and the keystore already exists, the current
- type is left untouched, unless another option leads to overwrite the
- keystore (in that case, this option behaves like for keystore creation).
- - When O(keystore_type) is set, the keystore is created with this type if
- it does not already exist, or is overwritten to match the given type in
- case of mismatch.
+ - When this option is omitted and the keystore does not already exist, the behavior follows C(keytool)'s default store
+ type which depends on Java version; V(pkcs12) since Java 9 and V(jks) prior (may also be V(pkcs12) if new default
+ has been backported to this version).
+ - When this option is omitted and the keystore already exists, the current type is left untouched, unless another option
+ leads to overwrite the keystore (in that case, this option behaves like for keystore creation).
+ - When O(keystore_type) is set, the keystore is created with this type if it does not already exist, or is overwritten
+ to match the given type in case of mismatch.
type: str
choices:
- jks
@@ -135,16 +126,14 @@ seealso:
- module: community.crypto.openssl_pkcs12
- module: community.general.java_cert
notes:
- - O(certificate) and O(private_key) require that their contents are available
- on the controller (either inline in a playbook, or with the P(ansible.builtin.file#lookup) lookup),
- while O(certificate_path) and O(private_key_path) require that the files are
- available on the target host.
- - By design, any change of a value of options O(keystore_type), O(name) or
- O(password), as well as changes of key or certificate materials will cause
- the existing O(dest) to be overwritten.
-'''
+ - O(certificate) and O(private_key) require that their contents are available on the controller (either inline in a playbook,
+ or with the P(ansible.builtin.file#lookup) lookup), while O(certificate_path) and O(private_key_path) require that the
+ files are available on the target host.
+ - By design, any change of a value of options O(keystore_type), O(name) or O(password), as well as changes of key or certificate
+ materials causes the existing O(dest) to be overwritten.
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Create a keystore for the given certificate/private key pair (inline)
community.general.java_keystore:
name: example
@@ -174,33 +163,21 @@ EXAMPLES = '''
private_key_path: /etc/ssl/private/ssl-cert-snakeoil.key
password: changeit
dest: /etc/security/keystore.jks
-'''
-
-RETURN = '''
-msg:
- description: Output from stdout of keytool/openssl command after execution of given command or an error.
- returned: changed and failure
- type: str
- sample: "Unable to find the current certificate fingerprint in ..."
+"""
+RETURN = r"""
err:
description: Output from stderr of keytool/openssl command after error of given command.
returned: failure
type: str
sample: "Keystore password is too short - must be at least 6 characters\n"
-rc:
- description: keytool/openssl command execution return value
- returned: changed and failure
- type: int
- sample: "0"
-
cmd:
- description: Executed command to get action done
+ description: Executed command to get action done.
returned: changed and failure
type: str
sample: "/usr/bin/openssl x509 -noout -in /tmp/user/1000/tmp8jd_lh23 -fingerprint -sha256"
-'''
+"""
import os
diff --git a/plugins/modules/jboss.py b/plugins/modules/jboss.py
index 3d07a38d63..2d4f4b9bad 100644
--- a/plugins/modules/jboss.py
+++ b/plugins/modules/jboss.py
@@ -9,7 +9,7 @@ from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = r'''
+DOCUMENTATION = r"""
module: jboss
short_description: Deploy applications to JBoss
description:
@@ -39,23 +39,23 @@ options:
- The location in the filesystem where the deployment scanner listens.
type: path
state:
- choices: [ present, absent ]
+ choices: [present, absent]
default: "present"
description:
- Whether the application should be deployed or undeployed.
type: str
notes:
- - The JBoss standalone deployment-scanner has to be enabled in standalone.xml
- - The module can wait until O(deployment) file is deployed/undeployed by deployment-scanner.
- Duration of waiting time depends on scan-interval parameter from standalone.xml.
- - Ensure no identically named application is deployed through the JBoss CLI
+ - The JBoss standalone deployment-scanner has to be enabled in C(standalone.xml).
+ - The module can wait until O(deployment) file is deployed/undeployed by deployment-scanner. Duration of waiting time depends
+ on scan-interval parameter from C(standalone.xml).
+ - Ensure no identically named application is deployed through the JBoss CLI.
seealso:
-- name: WildFly reference
- description: Complete reference of the WildFly documentation.
- link: https://docs.wildfly.org
+ - name: WildFly reference
+ description: Complete reference of the WildFly documentation.
+ link: https://docs.wildfly.org
author:
- Jeroen Hoekx (@jhoekx)
-'''
+"""
EXAMPLES = r"""
- name: Deploy a hello world application to the default deploy_path
diff --git a/plugins/modules/jenkins_build.py b/plugins/modules/jenkins_build.py
index 6d830849e7..4e11dd3642 100644
--- a/plugins/modules/jenkins_build.py
+++ b/plugins/modules/jenkins_build.py
@@ -8,13 +8,12 @@
from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
module: jenkins_build
short_description: Manage jenkins builds
version_added: 2.2.0
description:
- - Manage Jenkins builds with Jenkins REST API.
+ - Manage Jenkins builds with Jenkins REST API.
requirements:
- "python-jenkins >= 0.4.12"
author:
@@ -64,7 +63,7 @@ options:
type: str
user:
description:
- - User to authenticate with the Jenkins server.
+ - User to authenticate with the Jenkins server.
type: str
detach:
description:
@@ -79,9 +78,9 @@ options:
default: 10
type: int
version_added: 7.4.0
-'''
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Create a jenkins build using basic authentication
community.general.jenkins_build:
name: "test-check"
@@ -100,6 +99,16 @@ EXAMPLES = '''
state: stopped
url: http://localhost:8080
+- name: Trigger Jenkins build in detached mode
+ community.general.jenkins_build:
+ name: "detached-build"
+ state: present
+ user: admin
+ token: abcdefghijklmnopqrstuvwxyz123456
+ url: http://localhost:8080
+ detach: true
+ time_between_checks: 20
+
- name: Delete a jenkins build using token authentication
community.general.jenkins_build:
name: "delete-experiment"
@@ -108,10 +117,9 @@ EXAMPLES = '''
user: Jenkins
token: abcdefghijklmnopqrstuvwxyz123456
url: http://localhost:8080
-'''
+"""
-RETURN = '''
----
+RETURN = r"""
name:
description: Name of the jenkins job.
returned: success
@@ -128,7 +136,7 @@ user:
type: str
sample: admin
url:
- description: Url to connect to the Jenkins server.
+ description: URL to connect to the Jenkins server.
returned: success
type: str
sample: https://jenkins.mydomain.com
@@ -136,7 +144,7 @@ build_info:
description: Build info of the jenkins job.
returned: success
type: dict
-'''
+"""
import traceback
from time import sleep
@@ -182,11 +190,11 @@ class JenkinsBuild:
def get_jenkins_connection(self):
try:
- if (self.user and self.password):
+ if self.user and self.password:
return jenkins.Jenkins(self.jenkins_url, self.user, self.password)
- elif (self.user and self.token):
+ elif self.user and self.token:
return jenkins.Jenkins(self.jenkins_url, self.user, self.token)
- elif (self.user and not (self.password or self.token)):
+ elif self.user and not (self.password or self.token):
return jenkins.Jenkins(self.jenkins_url, self.user)
else:
return jenkins.Jenkins(self.jenkins_url)
diff --git a/plugins/modules/jenkins_build_info.py b/plugins/modules/jenkins_build_info.py
index eae6eb9374..85cb22ad2c 100644
--- a/plugins/modules/jenkins_build_info.py
+++ b/plugins/modules/jenkins_build_info.py
@@ -8,13 +8,12 @@
from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
module: jenkins_build_info
short_description: Get information about Jenkins builds
version_added: 7.4.0
description:
- - Get information about Jenkins builds with Jenkins REST API.
+ - Get information about Jenkins builds with Jenkins REST API.
requirements:
- "python-jenkins >= 0.4.12"
author:
@@ -31,7 +30,7 @@ options:
build_number:
description:
- An integer which specifies a build of a job.
- - If not specified the last build information will be returned.
+ - If not specified the last build information is returned.
type: int
password:
description:
@@ -48,11 +47,11 @@ options:
type: str
user:
description:
- - User to authenticate with the Jenkins server.
+ - User to authenticate with the Jenkins server.
type: str
-'''
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Get information about a jenkins build using basic authentication
community.general.jenkins_build_info:
name: "test-check"
@@ -74,10 +73,9 @@ EXAMPLES = '''
user: Jenkins
token: abcdefghijklmnopqrstuvwxyz123456
url: http://localhost:8080
-'''
+"""
-RETURN = '''
----
+RETURN = r"""
name:
description: Name of the jenkins job.
returned: success
@@ -102,7 +100,7 @@ build_info:
description: Build info of the jenkins job.
returned: success
type: dict
-'''
+"""
import traceback
@@ -140,11 +138,11 @@ class JenkinsBuildInfo:
def get_jenkins_connection(self):
try:
- if (self.user and self.password):
+ if self.user and self.password:
return jenkins.Jenkins(self.jenkins_url, self.user, self.password)
- elif (self.user and self.token):
+ elif self.user and self.token:
return jenkins.Jenkins(self.jenkins_url, self.user, self.token)
- elif (self.user and not (self.password or self.token)):
+ elif self.user and not (self.password or self.token):
return jenkins.Jenkins(self.jenkins_url, self.user)
else:
return jenkins.Jenkins(self.jenkins_url)
diff --git a/plugins/modules/jenkins_credential.py b/plugins/modules/jenkins_credential.py
new file mode 100644
index 0000000000..3bd8a9dd7a
--- /dev/null
+++ b/plugins/modules/jenkins_credential.py
@@ -0,0 +1,863 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = r"""
+module: jenkins_credential
+short_description: Manage Jenkins credentials and domains through API
+version_added: 11.1.0
+description:
+ - This module allows managing Jenkins credentials and domain scopes through the Jenkins HTTP API.
+ - Create, update, and delete different credential types such as C(username/password), C(secret text), C(SSH key), C(certificates),
+ C(GitHub App), and domains.
+ - For scoped domains (O(type=scope)), it supports restrictions based on V(hostname), V(hostname:port), V(path), and V(scheme).
+requirements:
+ - urllib3 >= 1.26.0
+author:
+ - Youssef Ali (@YoussefKhalidAli)
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ id:
+ description:
+ - The ID of the Jenkins credential or domain.
+ type: str
+ type:
+ description:
+ - Type of the credential or action.
+ choices:
+ - user_and_pass
+ - file
+ - text
+ - github_app
+ - ssh_key
+ - certificate
+ - scope
+ - token
+ type: str
+ state:
+ description:
+ - The state of the credential.
+ choices:
+ - present
+ - absent
+ default: present
+ type: str
+ scope:
+ description:
+ - Jenkins credential domain scope.
+ - Deleting a domain scope deletes all credentials within it.
+ type: str
+ default: '_'
+ force:
+ description:
+ - Force update if the credential already exists, used with O(state=present).
+ - If set to V(true), it deletes the existing credential before creating a new one.
+ - Always returns RV(ignore:changed=true).
+ type: bool
+ default: false
+ url:
+ description:
+ - Jenkins server URL.
+ type: str
+ default: http://localhost:8080
+ jenkins_user:
+ description:
+ - Jenkins user for authentication.
+ required: true
+ type: str
+ jenkins_password:
+ description:
+ - Jenkins password for token creation. Required if O(type=token).
+ type: str
+ token:
+ description:
+ - Jenkins API token. Required unless O(type=token).
+ type: str
+ description:
+ description:
+ - Description of the credential or domain.
+ default: ''
+ type: str
+ location:
+ description:
+ - Location of the credential. Either V(system) or V(folder).
+ - If O(location=folder) then O(url) must be set to V(/job/).
+ choices:
+ - system
+ - folder
+ default: 'system'
+ type: str
+ name:
+ description:
+ - Name of the token to generate. Required if O(type=token).
+ - When generating a new token, do not pass O(id). It is generated automatically.
+ - Creating two tokens with the same name generates two distinct tokens with different RV(token_uuid) values.
+ - Replacing a token with another one of the same name requires deleting the original first using O(force=True).
+ type: str
+ username:
+ description:
+ - Username for credentials types that require it (for example O(type=ssh_key) or O(type=user_and_pass)).
+ type: str
+ password:
+ description:
+ - Password for credentials types that require it (for example O(type=user_and_passs) or O(type=certificate)).
+ type: str
+ secret:
+ description:
+ - Secret text (used when O(type=text)).
+ type: str
+ appID:
+ description:
+ - GitHub App ID.
+ type: str
+ api_uri:
+ description:
+ - Link to Github API.
+ default: 'https://api.github.com'
+ type: str
+ owner:
+ description:
+ - GitHub App owner.
+ type: str
+ file_path:
+ description:
+ - File path to secret file (for example O(type=file) or O(type=certificate)).
+ - For O(type=certificate), this can be a V(.p12) or V(.pem) file.
+ type: path
+ private_key_path:
+ description:
+ - Path to private key file for PEM certificates or GitHub Apps.
+ type: path
+ passphrase:
+ description:
+ - SSH passphrase if needed.
+ type: str
+ inc_hostname:
+ description:
+ - List of hostnames to include in scope.
+ type: list
+ elements: str
+ exc_hostname:
+ description:
+ - List of hostnames to exclude from scope.
+ - If a hostname appears in both this list and O(inc_hostname), the hostname is excluded.
+ type: list
+ elements: str
+ inc_hostname_port:
+ description:
+ - List of V(host:port) to include in scope.
+ type: list
+ elements: str
+ exc_hostname_port:
+ description:
+ - List of host:port to exclude from scope.
+ - If a hostname and port appears in both this list and O(inc_hostname_port), it is excluded.
+ type: list
+ elements: str
+ inc_path:
+ description:
+ - List of URL paths to include when matching credentials to domains.
+ - 'B(Matching is hierarchical): subpaths of excluded paths are also excluded, even if explicitly included.'
+ type: list
+ elements: str
+ exc_path:
+ description:
+ - List of URL paths to exclude.
+ - If a path is also matched by O(exc_path), it is excluded.
+ - If you exclude a subpath of a path previously included, that subpath alone is excluded.
+ type: list
+ elements: str
+ schemes:
+ description:
+ - List of schemes (for example V(http) or V(https)) to match.
+ type: list
+ elements: str
+"""
+
+EXAMPLES = r"""
+- name: Generate token
+ community.general.jenkins_credential:
+ id: "test-token"
+ jenkins_user: "admin"
+ jenkins_password: "password"
+ type: "token"
+ register: token_result
+
+- name: Add CUSTOM scope credential
+ community.general.jenkins_credential:
+ id: "CUSTOM"
+ type: "scope"
+ jenkins_user: "admin"
+ token: "{{ token }}"
+ description: "Custom scope credential"
+ inc_path:
+ - "include/path"
+ - "include/path2"
+ exc_path:
+ - "exclude/path"
+ - "exclude/path2"
+ inc_hostname:
+ - "included-hostname"
+ - "included-hostname2"
+ exc_hostname:
+ - "excluded-hostname"
+ - "excluded-hostname2"
+ schemes:
+ - "http"
+ - "https"
+ inc_hostname_port:
+ - "included-hostname:7000"
+ - "included-hostname2:7000"
+ exc_hostname_port:
+ - "excluded-hostname:7000"
+ - "excluded-hostname2:7000"
+
+- name: Add user_and_pass credential
+ community.general.jenkins_credential:
+ id: "userpass-id"
+ type: "user_and_pass"
+ jenkins_user: "admin"
+ token: "{{ token }}"
+ description: "User and password credential"
+ username: "user1"
+ password: "pass1"
+
+- name: Add file credential to custom scope
+ community.general.jenkins_credential:
+ id: "file-id"
+ type: "file"
+ jenkins_user: "admin"
+ token: "{{ token }}"
+ scope: "CUSTOM"
+ description: "File credential"
+ file_path: "../vars/my-secret.pem"
+
+- name: Add text credential to folder
+ community.general.jenkins_credential:
+ id: "text-id"
+ type: "text"
+ jenkins_user: "admin"
+ token: "{{ token }}"
+ description: "Text credential"
+ secret: "mysecrettext"
+ location: "folder"
+ url: "http://localhost:8080/job/test"
+
+- name: Add githubApp credential
+ community.general.jenkins_credential:
+ id: "githubapp-id"
+ type: "github_app"
+ jenkins_user: "admin"
+ token: "{{ token }}"
+ description: "GitHub app credential"
+ appID: "12345"
+ file_path: "../vars/github.pem"
+ owner: "github_owner"
+
+- name: Add sshKey credential
+ community.general.jenkins_credential:
+ id: "sshkey-id"
+ type: "ssh_key"
+ jenkins_user: "admin"
+ token: "{{ token }}"
+ description: "SSH key credential"
+ username: "sshuser"
+ file_path: "../vars/ssh_key"
+ passphrase: 1234
+
+- name: Add certificate credential (p12)
+ community.general.jenkins_credential:
+ id: "certificate-id"
+ type: "certificate"
+ jenkins_user: "admin"
+ token: "{{ token }}"
+ description: "Certificate credential"
+ password: "12345678901234"
+ file_path: "../vars/certificate.p12"
+
+- name: Add certificate credential (pem)
+ community.general.jenkins_credential:
+ id: "certificate-id-pem"
+ type: "certificate"
+ jenkins_user: "admin"
+ token: "{{ token }}"
+ description: "Certificate credential (pem)"
+ file_path: "../vars/cert.pem"
+ private_key_path: "../vars/private.key"
+"""
+RETURN = r"""
+details:
+ description: Return more details in case of errors.
+ type: str
+ returned: failed
+token:
+ description:
+ - The generated API token if O(type=token).
+ - This is needed to authenticate API calls later.
+ - This should be stored securely, as it is the only time it is returned.
+ type: str
+ returned: success
+token_uuid:
+ description:
+ - The generated ID of the token.
+ - You pass this value back to the module as O(id) to edit or revoke the token later.
+ - This should be stored securely, as it is the only time it is returned.
+ type: str
+ returned: success
+"""
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import fetch_url, basic_auth_header
+from ansible.module_utils.six.moves.urllib.parse import urlencode
+from ansible_collections.community.general.plugins.module_utils import deps
+
+import json
+import os
+import base64
+
+with deps.declare("urllib3", reason="urllib3 is required to embed files into requests"):
+ import urllib3
+
+
+# Function to validate file paths exist on disk
+def validate_file_exist(module, path):
+
+ if path and not os.path.exists(path):
+ module.fail_json(msg="File not found: {}".format(path))
+
+
+# Gets the Jenkins crumb for CSRF protection which is required for API calls
+def get_jenkins_crumb(module, headers):
+ type = module.params["type"]
+ url = module.params["url"]
+
+ if "/job" in url:
+ url = url.split("/job")[0]
+
+ crumb_url = "{}/crumbIssuer/api/json".format(url)
+
+ response, info = fetch_url(module, crumb_url, headers=headers)
+
+ if info["status"] != 200:
+ module.fail_json(msg="Failed to fetch Jenkins crumb. Confirm token is real.")
+
+ # Cookie is needed to generate API token
+ cookie = info.get("set-cookie", "")
+ session_cookie = cookie.split(";")[0] if cookie else None
+
+ try:
+ data = response.read()
+ json_data = json.loads(data)
+ crumb_request_field = json_data["crumbRequestField"]
+ crumb = json_data["crumb"]
+ headers[crumb_request_field] = crumb # Set the crumb in headers
+ headers["Content-Type"] = (
+ "application/x-www-form-urlencoded" # Set Content-Type for form data
+ )
+ if type == "token":
+ headers["Cookie"] = (
+ session_cookie # Set session cookie for token operations
+ )
+ return crumb_request_field, crumb, session_cookie # Return for test purposes
+
+ except Exception:
+ return None
+
+
+# Function to clean the data sent via API by removing unwanted keys and None values
+def clean_data(data):
+ # Keys to remove (including those with None values)
+ keys_to_remove = {
+ "url",
+ "token",
+ "jenkins_user",
+ "jenkins_password",
+ "file_path",
+ "private_key_path",
+ "type",
+ "state",
+ "force",
+ "name",
+ "scope",
+ "location",
+ "api_uri",
+ }
+
+ # Filter out None values and unwanted keys
+ cleaned_data = {
+ key: value
+ for key, value in data.items()
+ if value is not None and key not in keys_to_remove
+ }
+
+ return cleaned_data
+
+
+# Function to check if credentials/domain exists
+def target_exists(module, check_domain=False):
+ url = module.params["url"]
+ location = module.params["location"]
+ scope = module.params["scope"]
+ name = module.params["id"]
+ user = module.params["jenkins_user"]
+ token = module.params["token"]
+
+ headers = {"Authorization": basic_auth_header(user, token)}
+
+ if module.params["type"] == "scope" or check_domain:
+ target_url = "{}/credentials/store/{}/domain/{}/api/json".format(
+ url, location, scope if check_domain else name
+ )
+ elif module.params["type"] == "token":
+ return False # Can't check token
+ else:
+ target_url = "{}/credentials/store/{}/domain/{}/credential/{}/api/json".format(
+ url, location, scope, name
+ )
+
+ response, info = fetch_url(module, target_url, headers=headers)
+ status = info.get("status", 0)
+
+ if status == 200:
+ return True
+ elif status == 404:
+ return False
+ else:
+ module.fail_json(
+ msg="Unexpected status code {} when checking {} existence.".format(
+ status, name
+ )
+ )
+
+
+# Function to delete the scope or credential provided
+def delete_target(module, headers):
+ user = module.params["jenkins_user"]
+ type = module.params["type"]
+ url = module.params["url"]
+ location = module.params["location"]
+ id = module.params["id"]
+ scope = module.params["scope"]
+
+ body = False
+
+ try:
+
+ if type == "token":
+ delete_url = "{}/user/{}/descriptorByName/jenkins.security.ApiTokenProperty/revoke".format(
+ url, user
+ )
+ body = urlencode({"tokenUuid": id})
+
+ elif type == "scope":
+ delete_url = "{}/credentials/store/{}/domain/{}/doDelete".format(
+ url, location, id
+ )
+
+ else:
+ delete_url = (
+ "{}/credentials/store/{}/domain/{}/credential/{}/doDelete".format(
+ url, location, scope, id
+ )
+ )
+
+ response, info = fetch_url(
+ module,
+ delete_url,
+ headers=headers,
+ data=body if body else None,
+ method="POST",
+ )
+
+ status = info.get("status", 0)
+ if not status == 200:
+ module.fail_json(
+ msg="Failed to delete: HTTP {}, {}, {}".format(
+ status, response, headers
+ )
+ )
+
+ except Exception as e:
+ module.fail_json(msg="Exception during delete: {}".format(str(e)))
+
+
+# Function to read the private key for types texts and ssh_key
+def read_privateKey(module):
+ try:
+ with open(module.params["private_key_path"], "r") as f:
+ private_key = f.read().strip()
+ return private_key
+ except Exception as e:
+ module.fail_json(msg="Failed to read private key file: {}".format(str(e)))
+
+
+# Function to builds multipart form-data body and content-type header for file credential upload.
+# Returns:
+# body (bytes): Encoded multipart data
+# content_type (str): Content-Type header including boundary
+def embed_file_into_body(module, file_path, credentials):
+
+ filename = os.path.basename(file_path)
+
+ try:
+ with open(file_path, "rb") as f:
+ file_bytes = f.read()
+ except Exception as e:
+ module.fail_json(msg="Failed to read file: {}".format(str(e)))
+ return "", "" # Return for test purposes
+
+ credentials.update(
+ {
+ "file": "file0",
+ "fileName": filename,
+ }
+ )
+
+ payload = {"credentials": credentials}
+
+ fields = {"file0": (filename, file_bytes), "json": json.dumps(payload)}
+
+ body, content_type = urllib3.encode_multipart_formdata(fields)
+ return body, content_type
+
+
+# Main function to run the Ansible module
+def run_module():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ id=dict(type="str"),
+ type=dict(
+ type="str",
+ choices=[
+ "user_and_pass",
+ "file",
+ "text",
+ "github_app",
+ "ssh_key",
+ "certificate",
+ "scope",
+ "token",
+ ],
+ ),
+ state=dict(type="str", default="present", choices=["present", "absent"]),
+ force=dict(type="bool", default=False),
+ scope=dict(type="str", default="_"),
+ url=dict(type="str", default="http://localhost:8080"),
+ jenkins_user=dict(type="str", required=True),
+ jenkins_password=dict(type="str", no_log=True),
+ token=dict(type="str", no_log=True),
+ description=dict(type="str", default=""),
+ location=dict(type="str", default="system", choices=["system", "folder"]),
+ name=dict(type="str"),
+ username=dict(type="str"),
+ password=dict(type="str", no_log=True),
+ file_path=dict(type="path"),
+ secret=dict(type="str", no_log=True),
+ appID=dict(type="str"),
+ api_uri=dict(type="str", default="https://api.github.com"),
+ owner=dict(type="str"),
+ passphrase=dict(type="str", no_log=True),
+ private_key_path=dict(type="path", no_log=True),
+ # Scope specifications parameters
+ inc_hostname=dict(type="list", elements="str"),
+ exc_hostname=dict(type="list", elements="str"),
+ inc_hostname_port=dict(type="list", elements="str"),
+ exc_hostname_port=dict(type="list", elements="str"),
+ inc_path=dict(type="list", elements="str"),
+ exc_path=dict(type="list", elements="str"),
+ schemes=dict(type="list", elements="str"),
+ ),
+ supports_check_mode=True,
+ required_if=[
+ ("state", "present", ["type"]),
+ ("state", "absent", ["id"]),
+ ("type", "token", ["name", "jenkins_password"]),
+ ("type", "user_and_pass", ["username", "password", "id", "token"]),
+ ("type", "file", ["file_path", "id", "token"]),
+ ("type", "text", ["secret", "id", "token"]),
+ ("type", "github_app", ["appID", "private_key_path", "id", "token"]),
+ ("type", "ssh_key", ["username", "private_key_path", "id", "token"]),
+ ("type", "certificate", ["file_path", "id", "token"]),
+ ("type", "scope", ["id", "token"]),
+ ],
+ )
+
+ # Parameters
+ id = module.params["id"]
+ type = module.params["type"]
+ state = module.params["state"]
+ force = module.params["force"]
+ scope = module.params["scope"]
+ url = module.params["url"]
+ jenkins_user = module.params["jenkins_user"]
+ jenkins_password = module.params["jenkins_password"]
+ name = module.params["name"]
+ token = module.params["token"]
+ description = module.params["description"]
+ location = module.params["location"]
+ filePath = module.params["file_path"]
+ private_key_path = module.params["private_key_path"]
+ api_uri = module.params["api_uri"]
+ inc_hostname = module.params["inc_hostname"]
+ exc_hostname = module.params["exc_hostname"]
+ inc_hostname_port = module.params["inc_hostname_port"]
+ exc_hostname_port = module.params["exc_hostname_port"]
+ inc_path = module.params["inc_path"]
+ exc_path = module.params["exc_path"]
+ schemes = module.params["schemes"]
+
+ deps.validate(module)
+
+ headers = {
+ "Authorization": basic_auth_header(jenkins_user, token or jenkins_password),
+ }
+
+ # Get the crumb for CSRF protection
+ get_jenkins_crumb(module, headers)
+
+ result = dict(
+ changed=False,
+ msg="",
+ )
+
+ credentials = clean_data(module.params)
+
+ does_exist = target_exists(module)
+
+ # Check if the credential/domain doesn't exist and the user wants to delete
+ if not does_exist and state == "absent" and not type == "token":
+ result["changed"] = False
+ result["msg"] = "{} does not exist.".format(id)
+ module.exit_json(**result)
+
+ if state == "present":
+
+ # If updating, we need to delete the existing credential/domain first based on force parameter
+ if force and (does_exist or type == "token"):
+ delete_target(module, headers)
+ elif does_exist and not force:
+ result["changed"] = False
+ result["msg"] = "{} already exists. Use force=True to update.".format(id)
+ module.exit_json(**result)
+
+ if type == "token":
+
+ post_url = "{}/user/{}/descriptorByName/jenkins.security.ApiTokenProperty/generateNewToken".format(
+ url, jenkins_user
+ )
+
+ body = "newTokenName={}".format(name)
+
+ elif type == "scope":
+
+ post_url = "{}/credentials/store/{}/createDomain".format(url, location)
+
+ specifications = []
+
+ # Create a domain in Jenkins
+ if inc_hostname or exc_hostname:
+ specifications.append(
+ {
+ "stapler-class": "com.cloudbees.plugins.credentials.domains.HostnameSpecification",
+ "includes": ",".join(inc_hostname),
+ "excludes": ",".join(exc_hostname),
+ }
+ )
+
+ if inc_hostname_port or exc_hostname_port:
+ specifications.append(
+ {
+ "stapler-class": "com.cloudbees.plugins.credentials.domains.HostnamePortSpecification",
+ "includes": ",".join(inc_hostname_port),
+ "excludes": ",".join(exc_hostname_port),
+ }
+ )
+
+ if schemes:
+ specifications.append(
+ {
+ "stapler-class": "com.cloudbees.plugins.credentials.domains.SchemeSpecification",
+ "schemes": ",".join(schemes),
+ },
+ )
+
+ if inc_path or exc_path:
+ specifications.append(
+ {
+ "stapler-class": "com.cloudbees.plugins.credentials.domains.PathSpecification",
+ "includes": ",".join(inc_path),
+ "excludes": ",".join(exc_path),
+ }
+ )
+
+ payload = {
+ "name": id,
+ "description": description,
+ "specifications": specifications,
+ }
+
+ else:
+ if filePath:
+ validate_file_exist(module, filePath)
+ elif private_key_path:
+ validate_file_exist(module, private_key_path)
+
+ post_url = "{}/credentials/store/{}/domain/{}/createCredentials".format(
+ url, location, scope
+ )
+
+ cred_class = {
+ "user_and_pass": "com.cloudbees.plugins.credentials.impl.UsernamePasswordCredentialsImpl",
+ "file": "org.jenkinsci.plugins.plaincredentials.impl.FileCredentialsImpl",
+ "text": "org.jenkinsci.plugins.plaincredentials.impl.StringCredentialsImpl",
+ "github_app": "org.jenkinsci.plugins.github_branch_source.GitHubAppCredentials",
+ "ssh_key": "com.cloudbees.jenkins.plugins.sshcredentials.impl.BasicSSHUserPrivateKey",
+ "certificate": "com.cloudbees.plugins.credentials.impl.CertificateCredentialsImpl",
+ }
+ credentials.update({"$class": cred_class[type]})
+
+ if type == "file":
+
+ # Build multipart body and content-type
+ body, content_type = embed_file_into_body(module, filePath, credentials)
+ headers["Content-Type"] = content_type
+
+ elif type == "github_app":
+
+ private_key = read_privateKey(module)
+
+ credentials.update(
+ {
+ "privateKey": private_key,
+ "apiUri": api_uri,
+ }
+ )
+
+ elif type == "ssh_key":
+
+ private_key = read_privateKey(module)
+
+ credentials.update(
+ {
+ "privateKeySource": {
+ "stapler-class": "com.cloudbees.jenkins.plugins.sshcredentials.impl.BasicSSHUserPrivateKey$DirectEntryPrivateKeySource",
+ "privateKey": private_key,
+ },
+ }
+ )
+
+ elif type == "certificate":
+
+ name, ext = os.path.splitext(filePath)
+
+ if ext.lower() in [".p12", ".pfx"]:
+ try:
+ with open(filePath, "rb") as f:
+ file_content = f.read()
+ uploaded_keystore = base64.b64encode(file_content).decode(
+ "utf-8"
+ )
+ except Exception as e:
+ module.fail_json(
+ msg="Failed to read or encode keystore file: {}".format(
+ str(e)
+ )
+ )
+
+ credentials.update(
+ {
+ "keyStoreSource": {
+ "$class": "com.cloudbees.plugins.credentials.impl.CertificateCredentialsImpl$UploadedKeyStoreSource",
+ "uploadedKeystore": uploaded_keystore,
+ },
+ }
+ )
+
+ elif ext.lower() in [".pem", ".crt"]: # PEM mode
+ try:
+ with open(filePath, "r") as f:
+ cert_chain = f.read()
+ with open(private_key_path, "r") as f:
+ private_key = f.read()
+ except Exception as e:
+ module.fail_json(
+ msg="Failed to read PEM files: {}".format(str(e))
+ )
+
+ credentials.update(
+ {
+ "keyStoreSource": {
+ "$class": "com.cloudbees.plugins.credentials.impl.CertificateCredentialsImpl$PEMEntryKeyStoreSource",
+ "certChain": cert_chain,
+ "privateKey": private_key,
+ },
+ }
+ )
+
+ else:
+ module.fail_json(
+ msg="Unsupported certificate file type. Only .p12, .pfx, .pem or .crt are supported."
+ )
+
+ payload = {"credentials": credentials}
+
+ if not type == "file" and not type == "token":
+ body = urlencode({"json": json.dumps(payload)})
+
+ else: # Delete
+
+ delete_target(module, headers)
+
+ module.exit_json(changed=True, msg="{} deleted successfully.".format(id))
+
+ if (
+ not type == "scope" and not scope == "_"
+ ): # Check if custom scope exists if adding to a custom scope
+ if not target_exists(module, True):
+ module.fail_json(msg="Domain {} doesn't exists".format(scope))
+
+ try:
+ response, info = fetch_url(
+ module, post_url, headers=headers, data=body, method="POST"
+ )
+ except Exception as e:
+ module.fail_json(msg="Request to {} failed: {}".format(post_url, str(e)))
+
+ status = info.get("status", 0)
+
+ if not status == 200:
+ body = response.read() if response else b""
+ module.fail_json(
+ msg="Failed to {} credential".format(
+ "add/update" if state == "present" else "delete"
+ ),
+ details=body.decode("utf-8", errors="ignore"),
+ )
+
+ if type == "token":
+ response_data = json.loads(response.read())
+ result["token"] = response_data["data"]["tokenValue"]
+ result["token_uuid"] = response_data["data"]["tokenUuid"]
+
+ result["changed"] = True
+ result["msg"] = response.read().decode("utf-8")
+
+ module.exit_json(**result)
+
+
+if __name__ == "__main__":
+ run_module()
diff --git a/plugins/modules/jenkins_job.py b/plugins/modules/jenkins_job.py
index e8301041f2..8362a40255 100644
--- a/plugins/modules/jenkins_job.py
+++ b/plugins/modules/jenkins_job.py
@@ -8,12 +8,11 @@
from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
module: jenkins_job
short_description: Manage jenkins jobs
description:
- - Manage Jenkins jobs by using Jenkins REST API.
+ - Manage Jenkins jobs by using Jenkins REST API.
requirements:
- "python-jenkins >= 0.4.12"
author: "Sergio Millan Rodriguez (@sermilrod)"
@@ -28,7 +27,7 @@ options:
config:
type: str
description:
- - config in XML format.
+ - Config in XML format.
- Required if job does not yet exist.
- Mutually exclusive with O(enabled).
- Considered if O(state=present).
@@ -71,20 +70,19 @@ options:
user:
type: str
description:
- - User to authenticate with the Jenkins server.
+ - User to authenticate with the Jenkins server.
required: false
validate_certs:
type: bool
default: true
description:
- - If set to V(false), the SSL certificates will not be validated.
- This should only set to V(false) used on personally controlled sites
- using self-signed certificates as it avoids verifying the source site.
+ - If set to V(false), the SSL certificates are not validated. This should only set to V(false) used on personally controlled
+ sites using self-signed certificates as it avoids verifying the source site.
- The C(python-jenkins) library only handles this by using the environment variable E(PYTHONHTTPSVERIFY).
version_added: 2.3.0
-'''
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Create a jenkins job using basic authentication
community.general.jenkins_job:
config: "{{ lookup('file', 'templates/test.xml') }}"
@@ -132,10 +130,9 @@ EXAMPLES = '''
enabled: false
url: http://localhost:8080
user: admin
-'''
+"""
-RETURN = '''
----
+RETURN = r"""
name:
description: Name of the jenkins job.
returned: success
@@ -157,11 +154,11 @@ user:
type: str
sample: admin
url:
- description: Url to connect to the Jenkins server.
+ description: URL to connect to the Jenkins server.
returned: success
type: str
sample: https://jenkins.mydomain.com
-'''
+"""
import os
import traceback
@@ -353,14 +350,14 @@ def job_config_to_string(xml_str):
def main():
module = AnsibleModule(
argument_spec=dict(
- config=dict(type='str', required=False),
+ config=dict(type='str'),
name=dict(type='str', required=True),
- password=dict(type='str', required=False, no_log=True),
- state=dict(type='str', required=False, choices=['present', 'absent'], default="present"),
- enabled=dict(required=False, type='bool'),
- token=dict(type='str', required=False, no_log=True),
- url=dict(type='str', required=False, default="http://localhost:8080"),
- user=dict(type='str', required=False),
+ password=dict(type='str', no_log=True),
+ state=dict(type='str', choices=['present', 'absent'], default="present"),
+ enabled=dict(type='bool'),
+ token=dict(type='str', no_log=True),
+ url=dict(type='str', default="http://localhost:8080"),
+ user=dict(type='str'),
validate_certs=dict(type='bool', default=True),
),
mutually_exclusive=[
diff --git a/plugins/modules/jenkins_job_info.py b/plugins/modules/jenkins_job_info.py
index 40e1d7aea3..37d9af3f56 100644
--- a/plugins/modules/jenkins_job_info.py
+++ b/plugins/modules/jenkins_job_info.py
@@ -9,8 +9,7 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
module: jenkins_job_info
short_description: Get information about Jenkins jobs
description:
@@ -51,18 +50,18 @@ options:
user:
type: str
description:
- - User to authenticate with the Jenkins server.
+ - User to authenticate with the Jenkins server.
validate_certs:
description:
- - If set to V(false), the SSL certificates will not be validated.
- - This should only set to V(false) used on personally controlled sites using self-signed certificates.
+ - If set to V(false), the SSL certificates are not validated.
+ - This should only set to V(false) used on personally controlled sites using self-signed certificates.
default: true
type: bool
author:
- "Chris St. Pierre (@stpierre)"
-'''
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
# Get all Jenkins jobs anonymously
- community.general.jenkins_job_info:
user: admin
@@ -122,24 +121,23 @@ EXAMPLES = '''
token: 126df5c60d66c66e3b75b11104a16a8a
url: https://jenkins.example.com
register: my_jenkins_job_info
-'''
+"""
-RETURN = '''
----
+RETURN = r"""
jobs:
- description: All jobs found matching the specified criteria
+ description: All jobs found matching the specified criteria.
returned: success
type: list
sample:
[
- {
- "name": "test-job",
- "fullname": "test-folder/test-job",
- "url": "http://localhost:8080/job/test-job/",
- "color": "blue"
- },
+ {
+ "name": "test-job",
+ "fullname": "test-folder/test-job",
+ "url": "http://localhost:8080/job/test-job/",
+ "color": "blue"
+ }
]
-'''
+"""
import ssl
import fnmatch
@@ -212,8 +210,8 @@ def get_jobs(module):
jobs = all_jobs
# python-jenkins includes the internal Jenkins class used for each job
# in its return value; we strip that out because the leading underscore
- # (and the fact that it's not documented in the python-jenkins docs)
- # indicates that it's not part of the dependable public interface.
+ # (and the fact that it is not documented in the python-jenkins docs)
+ # indicates that it is not part of the dependable public interface.
for job in jobs:
if "_class" in job:
del job["_class"]
diff --git a/plugins/modules/jenkins_node.py b/plugins/modules/jenkins_node.py
new file mode 100644
index 0000000000..aa75100168
--- /dev/null
+++ b/plugins/modules/jenkins_node.py
@@ -0,0 +1,486 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r"""
+module: jenkins_node
+short_description: Manage Jenkins nodes
+version_added: 10.0.0
+description:
+ - Manage Jenkins nodes with Jenkins REST API.
+requirements:
+ - "python-jenkins >= 0.4.12"
+author:
+ - Connor Newton (@phyrwork)
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: partial
+ details:
+ - Check mode is unable to show configuration changes for a node that is not yet present.
+ diff_mode:
+ support: none
+options:
+ url:
+ description:
+ - URL of the Jenkins server.
+ default: http://localhost:8080
+ type: str
+ name:
+ description:
+ - Name of the Jenkins node to manage.
+ required: true
+ type: str
+ user:
+ description:
+ - User to authenticate with the Jenkins server.
+ type: str
+ token:
+ description:
+ - API token to authenticate with the Jenkins server.
+ type: str
+ state:
+ description:
+ - Specifies whether the Jenkins node should be V(present) (created), V(absent) (deleted), V(enabled) (online) or V(disabled)
+ (offline).
+ default: present
+ choices: ['enabled', 'disabled', 'present', 'absent']
+ type: str
+ num_executors:
+ description:
+ - When specified, sets the Jenkins node executor count.
+ type: int
+ labels:
+ description:
+ - When specified, sets the Jenkins node labels.
+ type: list
+ elements: str
+ offline_message:
+ description:
+ - Specifies the offline reason message to be set when configuring the Jenkins node state.
+ - If O(offline_message) is given and requested O(state) is not V(disabled), an error is raised.
+ - Internally O(offline_message) is set using the V(toggleOffline) API, so updating the message when the node is already
+ offline (current state V(disabled)) is not possible. In this case, a warning is issued.
+ type: str
+ version_added: 10.0.0
+"""
+
+EXAMPLES = r"""
+- name: Create a Jenkins node using token authentication
+ community.general.jenkins_node:
+ url: http://localhost:8080
+ user: jenkins
+ token: 11eb751baabb66c4d1cb8dc4e0fb142cde
+ name: my-node
+ state: present
+
+- name: Set number of executors on Jenkins node
+ community.general.jenkins_node:
+ name: my-node
+ state: present
+ num_executors: 4
+
+- name: Set labels on Jenkins node
+ community.general.jenkins_node:
+ name: my-node
+ state: present
+ labels:
+ - label-1
+ - label-2
+ - label-3
+
+- name: Set Jenkins node offline with offline message.
+ community.general.jenkins_node:
+ name: my-node
+ state: disabled
+ offline_message: >-
+ This node is offline for some reason.
+"""
+
+RETURN = r"""
+url:
+ description: URL used to connect to the Jenkins server.
+ returned: success
+ type: str
+ sample: https://jenkins.mydomain.com
+user:
+ description: User used for authentication.
+ returned: success
+ type: str
+ sample: jenkins
+name:
+ description: Name of the Jenkins node.
+ returned: success
+ type: str
+ sample: my-node
+state:
+ description: State of the Jenkins node.
+ returned: success
+ type: str
+ sample: present
+created:
+ description: Whether or not the Jenkins node was created by the task.
+ returned: success
+ type: bool
+deleted:
+ description: Whether or not the Jenkins node was deleted by the task.
+ returned: success
+ type: bool
+disabled:
+ description: Whether or not the Jenkins node was disabled by the task.
+ returned: success
+ type: bool
+enabled:
+ description: Whether or not the Jenkins node was enabled by the task.
+ returned: success
+ type: bool
+configured:
+ description: Whether or not the Jenkins node was configured by the task.
+ returned: success
+ type: bool
+"""
+
+import sys
+import traceback
+from xml.etree import ElementTree as et
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.common.text.converters import to_native
+from ansible_collections.community.general.plugins.module_utils import deps
+
+with deps.declare(
+ "python-jenkins",
+ reason="python-jenkins is required to interact with Jenkins",
+ url="https://opendev.org/jjb/python-jenkins",
+):
+ import jenkins
+
+
+IS_PYTHON_2 = sys.version_info[0] <= 2
+
+
+class JenkinsNode:
+ def __init__(self, module):
+ self.module = module
+
+ self.name = module.params['name']
+ self.state = module.params['state']
+ self.token = module.params['token']
+ self.user = module.params['user']
+ self.url = module.params['url']
+ self.num_executors = module.params['num_executors']
+ self.labels = module.params['labels']
+ self.offline_message = module.params['offline_message'] # type: str | None
+
+ if self.offline_message is not None:
+ self.offline_message = self.offline_message.strip()
+
+ if self.state != "disabled":
+ self.module.fail_json("can not set offline message when state is not disabled")
+
+ if self.labels is not None:
+ for label in self.labels:
+ if " " in label:
+ self.module.fail_json("labels must not contain spaces: got invalid label {}".format(label))
+
+ self.instance = self.get_jenkins_instance()
+ self.result = {
+ 'changed': False,
+ 'url': self.url,
+ 'user': self.user,
+ 'name': self.name,
+ 'state': self.state,
+ 'created': False,
+ 'deleted': False,
+ 'disabled': False,
+ 'enabled': False,
+ 'configured': False,
+ 'warnings': [],
+ }
+
+ def get_jenkins_instance(self):
+ try:
+ if self.user and self.token:
+ return jenkins.Jenkins(self.url, self.user, self.token)
+ elif self.user and not self.token:
+ return jenkins.Jenkins(self.url, self.user)
+ else:
+ return jenkins.Jenkins(self.url)
+ except Exception as e:
+ self.module.fail_json(msg='Unable to connect to Jenkins server, %s' % to_native(e))
+
+ def configure_node(self, present):
+ if not present:
+ # Node would only not be present if in check mode and if not present there
+ # is no way to know what would and would not be changed.
+ if not self.module.check_mode:
+ raise Exception("configure_node present is False outside of check mode")
+ return
+
+ configured = False
+
+ data = self.instance.get_node_config(self.name)
+ root = et.fromstring(data)
+
+ if self.num_executors is not None:
+ elem = root.find('numExecutors')
+ if elem is None:
+ elem = et.SubElement(root, 'numExecutors')
+ if elem.text is None or int(elem.text) != self.num_executors:
+ elem.text = str(self.num_executors)
+ configured = True
+
+ if self.labels is not None:
+ elem = root.find('label')
+ if elem is None:
+ elem = et.SubElement(root, 'label')
+ labels = []
+ if elem.text:
+ labels = elem.text.split()
+ if labels != self.labels:
+ elem.text = " ".join(self.labels)
+ configured = True
+
+ if configured:
+ if IS_PYTHON_2:
+ data = et.tostring(root)
+ else:
+ data = et.tostring(root, encoding="unicode")
+
+ self.instance.reconfig_node(self.name, data)
+
+ self.result['configured'] = configured
+ if configured:
+ self.result['changed'] = True
+
+ def present_node(self, configure=True): # type: (bool) -> bool
+ """Assert node present.
+
+ Args:
+ configure: If True, run node configuration after asserting node present.
+
+ Returns:
+ True if the node is present, False otherwise (i.e. is check mode).
+ """
+ def create_node():
+ try:
+ self.instance.create_node(self.name, launcher=jenkins.LAUNCHER_SSH)
+ except jenkins.JenkinsException as e:
+ # Some versions of python-jenkins < 1.8.3 has an authorization bug when
+ # handling redirects returned when posting to resources. If the node is
+ # created OK then can ignore the error.
+ if not self.instance.node_exists(self.name):
+ self.module.fail_json(msg="Create node failed: %s" % to_native(e), exception=traceback.format_exc())
+
+ # TODO: Remove authorization workaround.
+ self.result['warnings'].append(
+ "suppressed 401 Not Authorized on redirect after node created: see https://review.opendev.org/c/jjb/python-jenkins/+/931707"
+ )
+
+ present = self.instance.node_exists(self.name)
+ created = False
+ if not present:
+ if not self.module.check_mode:
+ create_node()
+ present = True
+
+ created = True
+
+ if configure:
+ self.configure_node(present)
+
+ self.result['created'] = created
+ if created:
+ self.result['changed'] = True
+
+ return present # Used to gate downstream queries when in check mode.
+
+ def absent_node(self):
+ def delete_node():
+ try:
+ self.instance.delete_node(self.name)
+ except jenkins.JenkinsException as e:
+ # Some versions of python-jenkins < 1.8.3 has an authorization bug when
+ # handling redirects returned when posting to resources. If the node is
+ # deleted OK then can ignore the error.
+ if self.instance.node_exists(self.name):
+ self.module.fail_json(msg="Delete node failed: %s" % to_native(e), exception=traceback.format_exc())
+
+ # TODO: Remove authorization workaround.
+ self.result['warnings'].append(
+ "suppressed 401 Not Authorized on redirect after node deleted: see https://review.opendev.org/c/jjb/python-jenkins/+/931707"
+ )
+
+ present = self.instance.node_exists(self.name)
+ deleted = False
+ if present:
+ if not self.module.check_mode:
+ delete_node()
+
+ deleted = True
+
+ self.result['deleted'] = deleted
+ if deleted:
+ self.result['changed'] = True
+
+ def enabled_node(self):
+ def get_offline(): # type: () -> bool
+ return self.instance.get_node_info(self.name)["offline"]
+
+ present = self.present_node()
+
+ enabled = False
+
+ if present:
+ def enable_node():
+ try:
+ self.instance.enable_node(self.name)
+ except jenkins.JenkinsException as e:
+ # Some versions of python-jenkins < 1.8.3 has an authorization bug when
+ # handling redirects returned when posting to resources. If the node is
+ # disabled OK then can ignore the error.
+ offline = get_offline()
+
+ if offline:
+ self.module.fail_json(msg="Enable node failed: %s" % to_native(e), exception=traceback.format_exc())
+
+ # TODO: Remove authorization workaround.
+ self.result['warnings'].append(
+ "suppressed 401 Not Authorized on redirect after node enabled: see https://review.opendev.org/c/jjb/python-jenkins/+/931707"
+ )
+
+ offline = get_offline()
+
+ if offline:
+ if not self.module.check_mode:
+ enable_node()
+
+ enabled = True
+ else:
+ # Would have created node with initial state enabled therefore would not have
+ # needed to enable therefore not enabled.
+ if not self.module.check_mode:
+ raise Exception("enabled_node present is False outside of check mode")
+ enabled = False
+
+ self.result['enabled'] = enabled
+ if enabled:
+ self.result['changed'] = True
+
+ def disabled_node(self):
+ def get_offline_info():
+ info = self.instance.get_node_info(self.name)
+
+ offline = info["offline"]
+ offline_message = info["offlineCauseReason"]
+
+ return offline, offline_message
+
+ # Don't configure until after disabled, in case the change in configuration
+ # causes the node to pick up a job.
+ present = self.present_node(False)
+
+ disabled = False
+ changed = False
+
+ if present:
+ offline, offline_message = get_offline_info()
+
+ if self.offline_message is not None and self.offline_message != offline_message:
+ if offline:
+ # n.b. Internally disable_node uses toggleOffline gated by a not
+ # offline condition. This means that disable_node can not be used to
+ # update an offline message if the node is already offline.
+ #
+ # Toggling the node online to set the message when toggling offline
+ # again is not an option as during this transient online time jobs
+ # may be scheduled on the node which is not acceptable.
+ self.result["warnings"].append(
+ "unable to change offline message when already offline"
+ )
+ else:
+ offline_message = self.offline_message
+ changed = True
+
+ def disable_node():
+ try:
+ self.instance.disable_node(self.name, offline_message)
+ except jenkins.JenkinsException as e:
+ # Some versions of python-jenkins < 1.8.3 has an authorization bug when
+ # handling redirects returned when posting to resources. If the node is
+ # disabled OK then can ignore the error.
+ offline, _offline_message = get_offline_info()
+
+ if not offline:
+ self.module.fail_json(msg="Disable node failed: %s" % to_native(e), exception=traceback.format_exc())
+
+ # TODO: Remove authorization workaround.
+ self.result['warnings'].append(
+ "suppressed 401 Not Authorized on redirect after node disabled: see https://review.opendev.org/c/jjb/python-jenkins/+/931707"
+ )
+
+ if not offline:
+ if not self.module.check_mode:
+ disable_node()
+
+ disabled = True
+
+ else:
+ # Would have created node with initial state enabled therefore would have
+ # needed to disable therefore disabled.
+ if not self.module.check_mode:
+ raise Exception("disabled_node present is False outside of check mode")
+ disabled = True
+
+ if disabled:
+ changed = True
+
+ self.result['disabled'] = disabled
+
+ if changed:
+ self.result['changed'] = True
+
+ self.configure_node(present)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True, type='str'),
+ url=dict(default='http://localhost:8080'),
+ user=dict(),
+ token=dict(no_log=True),
+ state=dict(choices=['enabled', 'disabled', 'present', 'absent'], default='present'),
+ num_executors=dict(type='int'),
+ labels=dict(type='list', elements='str'),
+ offline_message=dict(type='str'),
+ ),
+ supports_check_mode=True,
+ )
+
+ deps.validate(module)
+
+ jenkins_node = JenkinsNode(module)
+
+ state = module.params.get('state')
+ if state == 'enabled':
+ jenkins_node.enabled_node()
+ elif state == 'disabled':
+ jenkins_node.disabled_node()
+ elif state == 'present':
+ jenkins_node.present_node()
+ else:
+ jenkins_node.absent_node()
+
+ module.exit_json(**jenkins_node.result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/plugins/modules/jenkins_plugin.py b/plugins/modules/jenkins_plugin.py
index 8834e0a2b2..f47dcfe92f 100644
--- a/plugins/modules/jenkins_plugin.py
+++ b/plugins/modules/jenkins_plugin.py
@@ -9,14 +9,12 @@ from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
module: jenkins_plugin
author: Jiri Tyr (@jtyr)
short_description: Add or remove Jenkins plugin
description:
- Ansible module which helps to manage Jenkins plugins.
-
attributes:
check_mode:
support: full
@@ -53,8 +51,7 @@ options:
type: str
description:
- Desired plugin state.
- - If set to V(latest), the check for new version will be performed
- every time. This is suitable to keep the plugin up-to-date.
+ - If set to V(latest), the check for new version is performed every time. This is suitable to keep the plugin up-to-date.
choices: [absent, present, pinned, unpinned, enabled, disabled, latest]
default: present
timeout:
@@ -65,12 +62,10 @@ options:
updates_expiration:
type: int
description:
- - Number of seconds after which a new copy of the C(update-center.json)
- file is downloaded. This is used to avoid the need to download the
- plugin to calculate its checksum when O(state=latest) is specified.
- - Set it to V(0) if no cache file should be used. In that case, the
- plugin file will always be downloaded to calculate its checksum when
- O(state=latest) is specified.
+ - Number of seconds after which a new copy of the C(update-center.json) file is downloaded. This is used to avoid the
+ need to download the plugin to calculate its checksum when O(state=latest) is specified.
+ - Set it to V(0) if no cache file should be used. In that case, the plugin file is always downloaded to calculate its
+ checksum when O(state=latest) is specified.
default: 86400
updates_url:
type: list
@@ -83,7 +78,7 @@ options:
type: list
elements: str
description:
- - A list of URL segment(s) to retrieve the update center json file from.
+ - A list of URL segment(s) to retrieve the update center JSON file from.
default: ['update-center.json', 'updates/update-center.json']
version_added: 3.3.0
latest_plugins_url_segments:
@@ -109,12 +104,11 @@ options:
type: str
description:
- Plugin version number.
- - If this option is specified, all plugin dependencies must be installed
- manually.
- - It might take longer to verify that the correct version is installed.
- This is especially true if a specific version number is specified.
- - Quote the version to prevent the value to be interpreted as float. For
- example if V(1.20) would be unquoted, it would become V(1.2).
+ - If this option is specified, all plugin dependencies must be installed manually.
+ - It might take longer to verify that the correct version is installed. This is especially true if a specific version
+ number is specified.
+ - Quote the version to prevent the value to be interpreted as float. For example if V(1.20) would be unquoted, it would
+ become V(1.2).
with_dependencies:
description:
- Defines whether to install plugin dependencies.
@@ -123,24 +117,20 @@ options:
default: true
notes:
- - Plugin installation should be run under root or the same user which owns
- the plugin files on the disk. Only if the plugin is not installed yet and
- no version is specified, the API installation is performed which requires
- only the Web UI credentials.
- - It is necessary to notify the handler or call the M(ansible.builtin.service) module to
- restart the Jenkins service after a new plugin was installed.
- - Pinning works only if the plugin is installed and Jenkins service was
- successfully restarted after the plugin installation.
- - It is not possible to run the module remotely by changing the O(url)
- parameter to point to the Jenkins server. The module must be used on the
- host where Jenkins runs as it needs direct access to the plugin files.
+ - Plugin installation should be run under root or the same user which owns the plugin files on the disk. Only if the plugin
+ is not installed yet and no version is specified, the API installation is performed which requires only the Web UI credentials.
+ - It is necessary to notify the handler or call the M(ansible.builtin.service) module to restart the Jenkins service after
+ a new plugin was installed.
+ - Pinning works only if the plugin is installed and Jenkins service was successfully restarted after the plugin installation.
+ - It is not possible to run the module remotely by changing the O(url) parameter to point to the Jenkins server. The module
+ must be used on the host where Jenkins runs as it needs direct access to the plugin files.
extends_documentation_fragment:
- ansible.builtin.url
- ansible.builtin.files
- community.general.attributes
-'''
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Install plugin
community.general.jenkins_plugin:
name: build-pipeline-plugin
@@ -281,8 +271,8 @@ EXAMPLES = '''
retries: 60
delay: 5
until: >
- 'status' in jenkins_service_status and
- jenkins_service_status['status'] == 200
+ 'status' in jenkins_service_status and
+ jenkins_service_status['status'] == 200
when: jenkins_restart_required
- name: Reset the fact
@@ -305,20 +295,20 @@ EXAMPLES = '''
when: >
'enabled' in item.value
with_dict: "{{ my_jenkins_plugins }}"
-'''
+"""
-RETURN = '''
+RETURN = r"""
plugin:
- description: plugin name
- returned: success
- type: str
- sample: build-pipeline-plugin
+ description: Plugin name.
+ returned: success
+ type: str
+ sample: build-pipeline-plugin
state:
- description: state of the target, after execution
- returned: success
- type: str
- sample: "present"
-'''
+ description: State of the target, after execution.
+ returned: success
+ type: str
+ sample: "present"
+"""
import hashlib
import io
diff --git a/plugins/modules/jenkins_script.py b/plugins/modules/jenkins_script.py
index 030c8e6fa3..5a00581366 100644
--- a/plugins/modules/jenkins_script.py
+++ b/plugins/modules/jenkins_script.py
@@ -9,17 +9,15 @@ from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
author: James Hogarth (@hogarthj)
module: jenkins_script
short_description: Executes a groovy script in the jenkins instance
description:
- - The C(jenkins_script) module takes a script plus a dict of values
- to use within the script and returns the result of the script being run.
-
+ - The C(jenkins_script) module takes a script plus a dict of values to use within the script and returns the result of the
+ script being run.
extends_documentation_fragment:
- - community.general.attributes
+ - community.general.attributes
attributes:
check_mode:
@@ -31,20 +29,18 @@ options:
script:
type: str
description:
- - The groovy script to be executed.
- This gets passed as a string Template if args is defined.
+ - The groovy script to be executed. This gets passed as a string Template if args is defined.
required: true
url:
type: str
description:
- - The jenkins server to execute the script against. The default is a local
- jenkins instance that is not being proxied through a webserver.
+ - The jenkins server to execute the script against. The default is a local jenkins instance that is not being proxied
+ through a webserver.
default: http://localhost:8080
validate_certs:
description:
- - If set to V(false), the SSL certificates will not be validated.
- This should only set to V(false) used on personally controlled sites
- using self-signed certificates as it avoids verifying the source site.
+ - If set to V(false), the SSL certificates are not validated. This should only set to V(false) used on personally controlled
+ sites using self-signed certificates as it avoids verifying the source site.
type: bool
default: true
user:
@@ -58,21 +54,18 @@ options:
timeout:
type: int
description:
- - The request timeout in seconds
+ - The request timeout in seconds.
default: 10
args:
type: dict
description:
- A dict of key-value pairs used in formatting the script using string.Template (see https://docs.python.org/2/library/string.html#template-strings).
-
notes:
- - Since the script can do anything this does not report on changes.
- Knowing the script is being run it's important to set changed_when
- for the ansible output to be clear on any alterations made.
+ - Since the script can do anything this does not report on changes. Knowing the script is being run it is important to set
+ C(changed_when) for the ansible output to be clear on any alterations made.
+"""
-'''
-
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Obtaining a list of plugins
community.general.jenkins_script:
script: 'println(Jenkins.instance.pluginManager.plugins)'
@@ -82,10 +75,10 @@ EXAMPLES = '''
- name: Setting master using a variable to hold a more complicate script
ansible.builtin.set_fact:
setmaster_mode: |
- import jenkins.model.*
- instance = Jenkins.getInstance()
- instance.setMode(${jenkins_mode})
- instance.save()
+ import jenkins.model.*
+ instance = Jenkins.getInstance()
+ instance.setMode(${jenkins_mode})
+ instance.save()
- name: Use the variable as the script
community.general.jenkins_script:
@@ -99,16 +92,16 @@ EXAMPLES = '''
user: admin
password: admin
url: https://localhost
- validate_certs: false # only do this when you trust the network!
-'''
+ validate_certs: false # only do this when you trust the network!
+"""
-RETURN = '''
+RETURN = r"""
output:
- description: Result of script
- returned: success
- type: str
- sample: 'Result: true'
-'''
+ description: Result of script.
+ returned: success
+ type: str
+ sample: 'Result: true'
+"""
import json
@@ -149,12 +142,12 @@ def main():
module = AnsibleModule(
argument_spec=dict(
script=dict(required=True, type="str"),
- url=dict(required=False, type="str", default="http://localhost:8080"),
- validate_certs=dict(required=False, type="bool", default=True),
- user=dict(required=False, type="str", default=None),
- password=dict(required=False, no_log=True, type="str", default=None),
- timeout=dict(required=False, type="int", default=10),
- args=dict(required=False, type="dict", default=None)
+ url=dict(type="str", default="http://localhost:8080"),
+ validate_certs=dict(type="bool", default=True),
+ user=dict(type="str"),
+ password=dict(no_log=True, type="str"),
+ timeout=dict(type="int", default=10),
+ args=dict(type="dict")
)
)
diff --git a/plugins/modules/jira.py b/plugins/modules/jira.py
index 64aed7e149..f99c252675 100644
--- a/plugins/modules/jira.py
+++ b/plugins/modules/jira.py
@@ -20,7 +20,6 @@ module: jira
short_description: Create and modify issues in a JIRA instance
description:
- Create and modify issues in a JIRA instance.
-
extends_documentation_fragment:
- community.general.attributes
@@ -36,90 +35,95 @@ options:
required: true
description:
- Base URI for the JIRA instance.
-
operation:
type: str
required: true
- aliases: [ command ]
- choices: [ attach, comment, create, edit, fetch, link, search, transition, update, worklog ]
+ aliases: [command]
+ choices: [attach, comment, create, edit, fetch, link, search, transition, update, worklog]
description:
- The operation to perform.
- V(worklog) was added in community.general 6.5.0.
-
username:
type: str
description:
- The username to log-in with.
- Must be used with O(password). Mutually exclusive with O(token).
-
password:
type: str
description:
- The password to log-in with.
- - Must be used with O(username). Mutually exclusive with O(token).
-
+ - Must be used with O(username). Mutually exclusive with O(token).
token:
type: str
description:
- The personal access token to log-in with.
- Mutually exclusive with O(username) and O(password).
version_added: 4.2.0
+ client_cert:
+ type: path
+ description:
+ - Client certificate if required.
+ - In addition to O(username) and O(password) or O(token). Not mutually exclusive.
+ version_added: 10.4.0
+ client_key:
+ type: path
+ description:
+ - Client certificate key if required.
+ - In addition to O(username) and O(password) or O(token). Not mutually exclusive.
+ version_added: 10.4.0
project:
type: str
required: false
description:
- The project for this operation. Required for issue creation.
-
summary:
type: str
required: false
description:
- - The issue summary, where appropriate.
- - Note that JIRA may not allow changing field values on specific transitions or states.
-
+ - The issue summary, where appropriate.
+ - Note that JIRA may not allow changing field values on specific transitions or states.
description:
type: str
required: false
description:
- - The issue description, where appropriate.
- - Note that JIRA may not allow changing field values on specific transitions or states.
-
+ - The issue description, where appropriate.
+ - Note that JIRA may not allow changing field values on specific transitions or states.
issuetype:
type: str
required: false
description:
- - The issue type, for issue creation.
-
+ - The issue type, for issue creation.
issue:
type: str
required: false
description:
- - An existing issue key to operate on.
+ - An existing issue key to operate on.
aliases: ['ticket']
comment:
type: str
required: false
description:
- - The comment text to add.
- - Note that JIRA may not allow changing field values on specific transitions or states.
-
+ - The comment text to add.
+ - Note that JIRA may not allow changing field values on specific transitions or states.
comment_visibility:
type: dict
description:
- - Used to specify comment comment visibility.
- - See U(https://developer.atlassian.com/cloud/jira/platform/rest/v2/api-group-issue-comments/#api-rest-api-2-issue-issueidorkey-comment-post) for details.
+ - Used to specify comment comment visibility.
+ - See
+ U(https://developer.atlassian.com/cloud/jira/platform/rest/v2/api-group-issue-comments/#api-rest-api-2-issue-issueidorkey-comment-post)
+ for details.
suboptions:
type:
description:
- - Use type to specify which of the JIRA visibility restriction types will be used.
+ - Use O(comment_visibility.type) to specify which of the JIRA visibility restriction types is used.
type: str
required: true
choices: [group, role]
value:
description:
- - Use value to specify value corresponding to the type of visibility restriction. For example name of the group or role.
+ - Specify value corresponding to the type of visibility restriction. For example name of the group or role.
type: str
required: true
version_added: '3.2.0'
@@ -128,63 +132,67 @@ options:
type: str
required: false
description:
- - Only used when O(operation) is V(transition), and a bit of a misnomer, it actually refers to the transition name.
-
+ - Only used when O(operation) is V(transition), and a bit of a misnomer, it actually refers to the transition name.
+ - This is mutually exclusive with O(status_id).
+ status_id:
+ type: str
+ required: false
+ description:
+ - Only used when O(operation) is V(transition), and refers to the transition ID.
+ - This is mutually exclusive with O(status).
+ version_added: 10.3.0
assignee:
type: str
required: false
description:
- - Sets the the assignee when O(operation) is V(create), V(transition), or V(edit).
- - Recent versions of JIRA no longer accept a user name as a user identifier. In that case, use O(account_id) instead.
- - Note that JIRA may not allow changing field values on specific transitions or states.
-
+ - Sets the the assignee when O(operation) is V(create), V(transition), or V(edit).
+ - Recent versions of JIRA no longer accept a user name as a user identifier. In that case, use O(account_id) instead.
+ - Note that JIRA may not allow changing field values on specific transitions or states.
account_id:
type: str
description:
- - Sets the account identifier for the assignee when O(operation) is V(create), V(transition), or V(edit).
- - Note that JIRA may not allow changing field values on specific transitions or states.
+ - Sets the account identifier for the assignee when O(operation) is V(create), V(transition), or V(edit).
+ - Note that JIRA may not allow changing field values on specific transitions or states.
version_added: 2.5.0
linktype:
type: str
required: false
description:
- - Set type of link, when action 'link' selected.
-
+ - Set type of link, when action 'link' selected.
inwardissue:
type: str
required: false
description:
- - Set issue from which link will be created.
-
+ - Set issue from which link is created.
outwardissue:
type: str
required: false
description:
- - Set issue to which link will be created.
-
+ - Set issue to which link is created.
fields:
type: dict
required: false
description:
- - This is a free-form data structure that can contain arbitrary data. This is passed directly to the JIRA REST API
- (possibly after merging with other required data, as when passed to create). See examples for more information,
- and the JIRA REST API for the structure required for various fields.
- - When passed to comment, the data structure is merged at the first level since community.general 4.6.0. Useful to add JIRA properties for example.
- - Note that JIRA may not allow changing field values on specific transitions or states.
+ - This is a free-form data structure that can contain arbitrary data. This is passed directly to the JIRA REST API (possibly
+ after merging with other required data, as when passed to create). See examples for more information, and the JIRA
+ REST API for the structure required for various fields.
+ - When passed to comment, the data structure is merged at the first level since community.general 4.6.0. Useful to add
+ JIRA properties for example.
+ - Note that JIRA may not allow changing field values on specific transitions or states.
default: {}
jql:
required: false
description:
- - Query JIRA in JQL Syntax, e.g. 'CMDB Hostname'='test.example.com'.
+ - Query JIRA in JQL Syntax, for example V("CMDB Hostname" = test.example.com).
type: str
version_added: '0.2.0'
maxresults:
required: false
description:
- - Limit the result of O(operation=search). If no value is specified, the default jira limit will be used.
- - Used when O(operation=search) only, ignored otherwise.
+ - Limit the result of O(operation=search). If no value is specified, the default JIRA limit is used.
+ - Used when O(operation=search) only, ignored otherwise.
type: int
version_added: '0.2.0'
@@ -198,7 +206,7 @@ options:
validate_certs:
required: false
description:
- - Require valid SSL certificates (set to V(false) if you would like to use self-signed certificates)
+ - Require valid SSL certificates (set to V(false) if you would like to use self-signed certificates).
default: true
type: bool
@@ -212,27 +220,24 @@ options:
required: true
type: path
description:
- - The path to the file to upload (from the remote node) or, if O(attachment.content) is specified,
- the filename to use for the attachment.
+ - The path to the file to upload (from the remote node) or, if O(attachment.content) is specified, the filename
+ to use for the attachment.
content:
type: str
description:
- - The Base64 encoded contents of the file to attach. If not specified, the contents of O(attachment.filename) will be
+ - The Base64 encoded contents of the file to attach. If not specified, the contents of O(attachment.filename) is
used instead.
mimetype:
type: str
description:
- - The MIME type to supply for the upload. If not specified, best-effort detection will be
- done.
-
+ - The MIME type to supply for the upload. If not specified, best-effort detection is performed.
notes:
- - "Currently this only works with basic-auth, or tokens."
- - "To use with JIRA Cloud, pass the login e-mail as the O(username) and the API token as O(password)."
-
+ - Currently this only works with basic-auth, or tokens.
+ - To use with JIRA Cloud, pass the login e-mail as the O(username) and the API token as O(password).
author:
-- "Steve Smith (@tarka)"
-- "Per Abildgaard Toft (@pertoft)"
-- "Brandon McNama (@DWSR)"
+ - "Steve Smith (@tarka)"
+ - "Per Abildgaard Toft (@pertoft)"
+ - "Brandon McNama (@DWSR)"
"""
EXAMPLES = r"""
@@ -249,8 +254,8 @@ EXAMPLES = r"""
issuetype: Task
args:
fields:
- customfield_13225: "test"
- customfield_12931: {"value": "Test"}
+ customfield_13225: "test"
+ customfield_12931: {"value": "Test"}
register: issue
- name: Comment on issue
@@ -362,9 +367,9 @@ EXAMPLES = r"""
operation: edit
args:
fields:
- labels:
- - autocreated
- - ansible
+ labels:
+ - autocreated
+ - ansible
# Updating a field using operations: add, set & remove
- name: Change the value of a Select dropdown
@@ -376,8 +381,8 @@ EXAMPLES = r"""
operation: update
args:
fields:
- customfield_12931: [ {'set': {'value': 'Virtual'}} ]
- customfield_13820: [ {'set': {'value':'Manually'}} ]
+ customfield_12931: ['set': {'value': 'Virtual'}]
+ customfield_13820: ['set': {'value': 'Manually'}]
register: cmdb_issue
delegate_to: localhost
@@ -406,7 +411,7 @@ EXAMPLES = r"""
jql: project=cmdb AND cf[13225]="test"
args:
fields:
- lastViewed: null
+ lastViewed:
register: issue
- name: Create a unix account for the reporter
@@ -452,6 +457,23 @@ EXAMPLES = r"""
operation: attach
attachment:
filename: topsecretreport.xlsx
+
+# Use username, password and client certificate authentification
+- name: Create an issue
+ community.general.jira:
+ uri: '{{ server }}'
+ username: '{{ user }}'
+ password: '{{ pass }}'
+ client_cert: '{{ path/to/client-cert }}'
+ client_key: '{{ path/to/client-key }}'
+
+# Use token and client certificate authentification
+- name: Create an issue
+ community.general.jira:
+ uri: '{{ server }}'
+ token: '{{ token }}'
+ client_cert: '{{ path/to/client-cert }}'
+ client_key: '{{ path/to/client-key }}'
"""
import base64
@@ -486,6 +508,8 @@ class JIRA(StateModuleHelper):
username=dict(type='str'),
password=dict(type='str', no_log=True),
token=dict(type='str', no_log=True),
+ client_cert=dict(type='path'),
+ client_key=dict(type='path'),
project=dict(type='str', ),
summary=dict(type='str', ),
description=dict(type='str', ),
@@ -497,6 +521,7 @@ class JIRA(StateModuleHelper):
value=dict(type='str', required=True)
)),
status=dict(type='str', ),
+ status_id=dict(type='str', ),
assignee=dict(type='str', ),
fields=dict(default={}, type='dict'),
linktype=dict(type='str', ),
@@ -512,9 +537,11 @@ class JIRA(StateModuleHelper):
['username', 'token'],
['password', 'token'],
['assignee', 'account_id'],
+ ['status', 'status_id']
],
required_together=[
['username', 'password'],
+ ['client_cert', 'client_key']
],
required_one_of=[
['username', 'token'],
@@ -525,13 +552,13 @@ class JIRA(StateModuleHelper):
('operation', 'comment', ['issue', 'comment']),
('operation', 'workflow', ['issue', 'comment']),
('operation', 'fetch', ['issue']),
- ('operation', 'transition', ['issue', 'status']),
+ ('operation', 'transition', ['issue']),
+ ('operation', 'transition', ['status', 'status_id'], True),
('operation', 'link', ['linktype', 'inwardissue', 'outwardissue']),
('operation', 'search', ['jql']),
),
supports_check_mode=False
)
- use_old_vardict = False
state_param = 'operation'
def __init_module__(self):
@@ -630,14 +657,27 @@ class JIRA(StateModuleHelper):
turl = self.vars.restbase + '/issue/' + self.vars.issue + "/transitions"
tmeta = self.get(turl)
- target = self.vars.status
tid = None
+ target = None
+
+ if self.vars.status is not None:
+ target = self.vars.status.strip()
+ elif self.vars.status_id is not None:
+ tid = self.vars.status_id.strip()
+
for t in tmeta['transitions']:
- if t['name'] == target:
- tid = t['id']
- break
+ if target is not None:
+ if t['name'] == target:
+ tid = t['id']
+ break
+ else:
+ if tid == t['id']:
+ break
else:
- raise ValueError("Failed find valid transition for '%s'" % target)
+ if target is not None:
+ raise ValueError("Failed find valid transition for '%s'" % target)
+ else:
+ raise ValueError("Failed find valid transition for ID '%s'" % tid)
fields = dict(self.vars.fields)
if self.vars.summary is not None:
diff --git a/plugins/modules/kdeconfig.py b/plugins/modules/kdeconfig.py
index 6cd546e090..ac542d04e8 100644
--- a/plugins/modules/kdeconfig.py
+++ b/plugins/modules/kdeconfig.py
@@ -7,25 +7,22 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
-DOCUMENTATION = r'''
----
+DOCUMENTATION = r"""
module: kdeconfig
short_description: Manage KDE configuration files
version_added: "6.5.0"
description:
- Add or change individual settings in KDE configuration files.
- It uses B(kwriteconfig) under the hood.
-
options:
path:
description:
- - Path to the config file. If the file does not exist it will be created.
+ - Path to the config file. If the file does not exist it is created.
type: path
required: true
kwriteconfig_path:
description:
- - Path to the kwriteconfig executable. If not specified, Ansible will try
- to discover it.
+ - Path to the kwriteconfig executable. If not specified, Ansible tries to discover it.
type: path
values:
description:
@@ -74,9 +71,9 @@ requirements:
- kwriteconfig
author:
- Salvatore Mesoraca (@smeso)
-'''
+"""
-EXAMPLES = r'''
+EXAMPLES = r"""
- name: Ensure "Homepage=https://www.ansible.com/" in group "Branding"
community.general.kdeconfig:
path: /etc/xdg/kickoffrc
@@ -97,9 +94,9 @@ EXAMPLES = r'''
key: KEY
value: VALUE
backup: true
-'''
+"""
-RETURN = r''' # '''
+RETURN = r""" # """
import os
import shutil
diff --git a/plugins/modules/kernel_blacklist.py b/plugins/modules/kernel_blacklist.py
index 224b5bba8c..e1cf3fddb5 100644
--- a/plugins/modules/kernel_blacklist.py
+++ b/plugins/modules/kernel_blacklist.py
@@ -9,47 +9,45 @@
from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
module: kernel_blacklist
author:
- - Matthias Vogelgesang (@matze)
+ - Matthias Vogelgesang (@matze)
short_description: Blacklist kernel modules
description:
- - Add or remove kernel modules from blacklist.
+ - Add or remove kernel modules from blacklist.
extends_documentation_fragment:
- - community.general.attributes
+ - community.general.attributes
attributes:
- check_mode:
- support: full
- diff_mode:
- support: full
+ check_mode:
+ support: full
+ diff_mode:
+ support: full
options:
- name:
- type: str
- description:
- - Name of kernel module to black- or whitelist.
- required: true
- state:
- type: str
- description:
- - Whether the module should be present in the blacklist or absent.
- choices: [ absent, present ]
- default: present
- blacklist_file:
- type: str
- description:
- - If specified, use this blacklist file instead of
- C(/etc/modprobe.d/blacklist-ansible.conf).
- default: /etc/modprobe.d/blacklist-ansible.conf
-'''
+ name:
+ type: str
+ description:
+ - Name of kernel module to black- or whitelist.
+ required: true
+ state:
+ type: str
+ description:
+ - Whether the module should be present in the blacklist or absent.
+ choices: [absent, present]
+ default: present
+ blacklist_file:
+ type: str
+ description:
+ - If specified, use this blacklist file instead of C(/etc/modprobe.d/blacklist-ansible.conf).
+ default: /etc/modprobe.d/blacklist-ansible.conf
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Blacklist the nouveau driver module
community.general.kernel_blacklist:
name: nouveau
state: present
-'''
+"""
import os
import re
@@ -67,7 +65,6 @@ class Blacklist(StateModuleHelper):
),
supports_check_mode=True,
)
- use_old_vardict = False
def __init_module__(self):
self.pattern = re.compile(r'^blacklist\s+{0}$'.format(re.escape(self.vars.name)))
diff --git a/plugins/modules/keycloak_authentication.py b/plugins/modules/keycloak_authentication.py
index 6ee7bc8186..ae6d24958c 100644
--- a/plugins/modules/keycloak_authentication.py
+++ b/plugins/modules/keycloak_authentication.py
@@ -7,109 +7,109 @@
from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
module: keycloak_authentication
short_description: Configure authentication in Keycloak
description:
- - This module actually can only make a copy of an existing authentication flow, add an execution to it and configure it.
- - It can also delete the flow.
-
+ - This module actually can only make a copy of an existing authentication flow, add an execution to it and configure it.
+ - It can also delete the flow.
version_added: "3.3.0"
attributes:
- check_mode:
- support: full
- diff_mode:
- support: full
+ check_mode:
+ support: full
+ diff_mode:
+ support: full
+ action_group:
+ version_added: 10.2.0
options:
- realm:
- description:
- - The name of the realm in which is the authentication.
- required: true
- type: str
- alias:
- description:
- - Alias for the authentication flow.
- required: true
- type: str
+ realm:
description:
+ - The name of the realm in which is the authentication.
+ required: true
+ type: str
+ alias:
+ description:
+ - Alias for the authentication flow.
+ required: true
+ type: str
+ description:
+ description:
+ - Description of the flow.
+ type: str
+ providerId:
+ description:
+ - C(providerId) for the new flow when not copied from an existing flow.
+ choices: ["basic-flow", "client-flow"]
+ type: str
+ copyFrom:
+ description:
+ - C(flowAlias) of the authentication flow to use for the copy.
+ type: str
+ authenticationExecutions:
+ description:
+ - Configuration structure for the executions.
+ type: list
+ elements: dict
+ suboptions:
+ providerId:
description:
- - Description of the flow.
+ - C(providerID) for the new flow when not copied from an existing flow.
type: str
- providerId:
+ displayName:
description:
- - C(providerId) for the new flow when not copied from an existing flow.
- choices: [ "basic-flow", "client-flow" ]
+ - Name of the execution or subflow to create or update.
type: str
- copyFrom:
+ requirement:
description:
- - C(flowAlias) of the authentication flow to use for the copy.
+ - Control status of the subflow or execution.
+ choices: ["REQUIRED", "ALTERNATIVE", "DISABLED", "CONDITIONAL"]
type: str
- authenticationExecutions:
+ flowAlias:
description:
- - Configuration structure for the executions.
- type: list
- elements: dict
- suboptions:
- providerId:
- description:
- - C(providerID) for the new flow when not copied from an existing flow.
- type: str
- displayName:
- description:
- - Name of the execution or subflow to create or update.
- type: str
- requirement:
- description:
- - Control status of the subflow or execution.
- choices: [ "REQUIRED", "ALTERNATIVE", "DISABLED", "CONDITIONAL" ]
- type: str
- flowAlias:
- description:
- - Alias of parent flow.
- type: str
- authenticationConfig:
- description:
- - Describe the config of the authentication.
- type: dict
- index:
- description:
- - Priority order of the execution.
- type: int
- subFlowType:
- description:
- - For new subflows, optionally specify the type.
- - Is only used at creation.
- choices: ["basic-flow", "form-flow"]
- default: "basic-flow"
- type: str
- version_added: 6.6.0
- state:
- description:
- - Control if the authentication flow must exists or not.
- choices: [ "present", "absent" ]
- default: present
+ - Alias of parent flow.
type: str
- force:
- type: bool
- default: false
+ authenticationConfig:
description:
- - If V(true), allows to remove the authentication flow and recreate it.
-
+ - Describe the config of the authentication.
+ type: dict
+ index:
+ description:
+ - Priority order of the execution.
+ type: int
+ subFlowType:
+ description:
+ - For new subflows, optionally specify the type.
+ - Is only used at creation.
+ choices: ["basic-flow", "form-flow"]
+ default: "basic-flow"
+ type: str
+ version_added: 6.6.0
+ state:
+ description:
+ - Control if the authentication flow must exists or not.
+ choices: ["present", "absent"]
+ default: present
+ type: str
+ force:
+ type: bool
+ default: false
+ description:
+ - If V(true), allows to remove the authentication flow and recreate it.
extends_documentation_fragment:
- - community.general.keycloak
- - community.general.attributes
+ - community.general.keycloak
+ - community.general.keycloak.actiongroup_keycloak
+ - community.general.attributes
author:
- - Philippe Gauthier (@elfelip)
- - Gaëtan Daubresse (@Gaetan2907)
-'''
+ - Philippe Gauthier (@elfelip)
+ - Gaëtan Daubresse (@Gaetan2907)
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Create an authentication flow from first broker login and add an execution to it.
community.general.keycloak_authentication:
auth_keycloak_url: http://localhost:8080/auth
@@ -123,15 +123,15 @@ EXAMPLES = '''
- providerId: "test-execution1"
requirement: "REQUIRED"
authenticationConfig:
- alias: "test.execution1.property"
- config:
- test1.property: "value"
+ alias: "test.execution1.property"
+ config:
+ test1.property: "value"
- providerId: "test-execution2"
requirement: "REQUIRED"
authenticationConfig:
- alias: "test.execution2.property"
- config:
- test2.property: "value"
+ alias: "test.execution2.property"
+ config:
+ test2.property: "value"
state: present
- name: Re-create the authentication flow
@@ -147,9 +147,9 @@ EXAMPLES = '''
- providerId: "test-provisioning"
requirement: "REQUIRED"
authenticationConfig:
- alias: "test.provisioning.property"
- config:
- test.provisioning.property: "value"
+ alias: "test.provisioning.property"
+ config:
+ test.provisioning.property: "value"
state: present
force: true
@@ -181,26 +181,29 @@ EXAMPLES = '''
realm: master
alias: "Copy of first broker login"
state: absent
-'''
+"""
-RETURN = '''
+RETURN = r"""
msg:
- description: Message as to what action was taken.
- returned: always
- type: str
+ description: Message as to what action was taken.
+ returned: always
+ type: str
end_state:
- description: Representation of the authentication after module execution.
- returned: on success
- type: dict
- sample: {
+ description: Representation of the authentication after module execution.
+ returned: on success
+ type: dict
+ sample:
+ {
"alias": "Copy of first broker login",
"authenticationExecutions": [
{
"alias": "review profile config",
"authenticationConfig": {
"alias": "review profile config",
- "config": { "update.profile.on.first.login": "missing" },
+ "config": {
+ "update.profile.on.first.login": "missing"
+ },
"id": "6f09e4fb-aad4-496a-b873-7fa9779df6d7"
},
"configurable": true,
@@ -210,7 +213,11 @@ end_state:
"level": 0,
"providerId": "idp-review-profile",
"requirement": "REQUIRED",
- "requirementChoices": [ "REQUIRED", "ALTERNATIVE", "DISABLED" ]
+ "requirementChoices": [
+ "REQUIRED",
+ "ALTERNATIVE",
+ "DISABLED"
+ ]
}
],
"builtIn": false,
@@ -219,7 +226,7 @@ end_state:
"providerId": "basic-flow",
"topLevel": true
}
-'''
+"""
from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak \
import KeycloakAPI, keycloak_argument_spec, get_token, KeycloakError, is_struct_included
@@ -246,7 +253,7 @@ def create_or_update_executions(kc, config, realm='master'):
"""
Create or update executions for an authentication flow.
:param kc: Keycloak API access.
- :param config: Representation of the authentication flow including it's executions.
+ :param config: Representation of the authentication flow including its executions.
:param realm: Realm
:return: tuple (changed, dict(before, after)
WHERE
@@ -360,8 +367,9 @@ def main():
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True,
- required_one_of=([['token', 'auth_realm', 'auth_username', 'auth_password']]),
- required_together=([['auth_realm', 'auth_username', 'auth_password']])
+ required_one_of=([['token', 'auth_realm', 'auth_username', 'auth_password', 'auth_client_id', 'auth_client_secret']]),
+ required_together=([['auth_username', 'auth_password']]),
+ required_by={'refresh_token': 'auth_realm'},
)
result = dict(changed=False, msg='', flow={})
diff --git a/plugins/modules/keycloak_authentication_required_actions.py b/plugins/modules/keycloak_authentication_required_actions.py
index 5ffbd2033c..69183ce605 100644
--- a/plugins/modules/keycloak_authentication_required_actions.py
+++ b/plugins/modules/keycloak_authentication_required_actions.py
@@ -9,81 +9,82 @@
from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
module: keycloak_authentication_required_actions
short_description: Allows administration of Keycloak authentication required actions
description:
- - This module can register, update and delete required actions.
- - It also filters out any duplicate required actions by their alias. The first occurrence is preserved.
-
+ - This module can register, update and delete required actions.
+ - It also filters out any duplicate required actions by their alias. The first occurrence is preserved.
version_added: 7.1.0
attributes:
- check_mode:
- support: full
- diff_mode:
- support: full
+ check_mode:
+ support: full
+ diff_mode:
+ support: full
+ action_group:
+ version_added: 10.2.0
options:
- realm:
+ realm:
+ description:
+ - The name of the realm in which are the authentication required actions.
+ required: true
+ type: str
+ required_actions:
+ elements: dict
+ description:
+ - Authentication required action.
+ suboptions:
+ alias:
description:
- - The name of the realm in which are the authentication required actions.
+ - Unique name of the required action.
required: true
type: str
- required_actions:
- elements: dict
+ config:
description:
- - Authentication required action.
- suboptions:
- alias:
- description:
- - Unique name of the required action.
- required: true
- type: str
- config:
- description:
- - Configuration for the required action.
- type: dict
- defaultAction:
- description:
- - Indicates, if any new user will have the required action assigned to it.
- type: bool
- enabled:
- description:
- - Indicates, if the required action is enabled or not.
- type: bool
- name:
- description:
- - Displayed name of the required action. Required for registration.
- type: str
- priority:
- description:
- - Priority of the required action.
- type: int
- providerId:
- description:
- - Provider ID of the required action. Required for registration.
- type: str
- type: list
- state:
- choices: [ "absent", "present" ]
+ - Configuration for the required action.
+ type: dict
+ defaultAction:
description:
- - Control if the realm authentication required actions are going to be registered/updated (V(present)) or deleted (V(absent)).
- required: true
+ - Indicates whether new users have the required action assigned to them.
+ type: bool
+ enabled:
+ description:
+ - Indicates, if the required action is enabled or not.
+ type: bool
+ name:
+ description:
+ - Displayed name of the required action. Required for registration.
type: str
+ priority:
+ description:
+ - Priority of the required action.
+ type: int
+ providerId:
+ description:
+ - Provider ID of the required action. Required for registration.
+ type: str
+ type: list
+ state:
+ choices: ["absent", "present"]
+ description:
+ - Control if the realm authentication required actions are going to be registered/updated (V(present)) or deleted (V(absent)).
+ required: true
+ type: str
extends_documentation_fragment:
- - community.general.keycloak
- - community.general.attributes
+ - community.general.keycloak
+ - community.general.keycloak.actiongroup_keycloak
+ - community.general.attributes
author:
- - Skrekulko (@Skrekulko)
-'''
+ - Skrekulko (@Skrekulko)
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Register a new required action.
community.general.keycloak_authentication_required_actions:
auth_client_id: "admin-cli"
@@ -123,56 +124,55 @@ EXAMPLES = '''
required_action:
- alias: "TERMS_AND_CONDITIONS"
state: "absent"
-'''
+"""
-RETURN = '''
+RETURN = r"""
msg:
- description: Message as to what action was taken.
- returned: always
- type: str
+ description: Message as to what action was taken.
+ returned: always
+ type: str
end_state:
- description: Representation of the authentication required actions after module execution.
- returned: on success
- type: complex
- contains:
- alias:
- description:
- - Unique name of the required action.
- sample: test-provider-id
- type: str
- config:
- description:
- - Configuration for the required action.
- sample: {}
- type: dict
- defaultAction:
- description:
- - Indicates, if any new user will have the required action assigned to it.
- sample: false
- type: bool
- enabled:
- description:
- - Indicates, if the required action is enabled or not.
- sample: false
- type: bool
- name:
- description:
- - Displayed name of the required action. Required for registration.
- sample: Test provider ID
- type: str
- priority:
- description:
- - Priority of the required action.
- sample: 90
- type: int
- providerId:
- description:
- - Provider ID of the required action. Required for registration.
- sample: test-provider-id
- type: str
-
-'''
+ description: Representation of the authentication required actions after module execution.
+ returned: on success
+ type: complex
+ contains:
+ alias:
+ description:
+ - Unique name of the required action.
+ sample: test-provider-id
+ type: str
+ config:
+ description:
+ - Configuration for the required action.
+ sample: {}
+ type: dict
+ defaultAction:
+ description:
+ - Indicates whether new users have the required action assigned to them.
+ sample: false
+ type: bool
+ enabled:
+ description:
+ - Indicates, if the required action is enabled or not.
+ sample: false
+ type: bool
+ name:
+ description:
+ - Displayed name of the required action. Required for registration.
+ sample: Test provider ID
+ type: str
+ priority:
+ description:
+ - Priority of the required action.
+ sample: 90
+ type: int
+ providerId:
+ description:
+ - Provider ID of the required action. Required for registration.
+ sample: test-provider-id
+ type: str
+"""
from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, \
keycloak_argument_spec, get_token, KeycloakError
@@ -237,8 +237,9 @@ def main():
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
- required_one_of=([['token', 'auth_realm', 'auth_username', 'auth_password']]),
- required_together=([['auth_realm', 'auth_username', 'auth_password']])
+ required_one_of=([['token', 'auth_realm', 'auth_username', 'auth_password', 'auth_client_id', 'auth_client_secret']]),
+ required_together=([['auth_username', 'auth_password']]),
+ required_by={'refresh_token': 'auth_realm'},
)
result = dict(changed=False, msg='', end_state={}, diff=dict(before={}, after={}))
diff --git a/plugins/modules/keycloak_authz_authorization_scope.py b/plugins/modules/keycloak_authz_authorization_scope.py
index 5eef9ac765..78d70c7ee6 100644
--- a/plugins/modules/keycloak_authz_authorization_scope.py
+++ b/plugins/modules/keycloak_authz_authorization_scope.py
@@ -9,78 +9,76 @@
from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
module: keycloak_authz_authorization_scope
-short_description: Allows administration of Keycloak client authorization scopes via Keycloak API
+short_description: Allows administration of Keycloak client authorization scopes using Keycloak API
version_added: 6.6.0
description:
- - This module allows the administration of Keycloak client Authorization Scopes via the Keycloak REST
- API. Authorization Scopes are only available if a client has Authorization enabled.
-
- - This module requires access to the REST API via OpenID Connect; the user connecting and the realm
- being used must have the requisite access rights. In a default Keycloak installation, admin-cli
- and an admin user would work, as would a separate realm definition with the scope tailored
- to your needs and a user having the expected roles.
-
- - The names of module options are snake_cased versions of the camelCase options used by Keycloak.
- The Authorization Services paths and payloads have not officially been documented by the Keycloak project.
- U(https://www.puppeteers.net/blog/keycloak-authorization-services-rest-api-paths-and-payload/)
-
+ - This module allows the administration of Keycloak client Authorization Scopes using the Keycloak REST API. Authorization
+ Scopes are only available if a client has Authorization enabled.
+ - This module requires access to the REST API using OpenID Connect; the user connecting and the realm being used must have
+ the requisite access rights. In a default Keycloak installation, admin-cli and an admin user would work, as would a separate
+ realm definition with the scope tailored to your needs and a user having the expected roles.
+ - The names of module options are snake_cased versions of the camelCase options used by Keycloak. The Authorization Services
+ paths and payloads have not officially been documented by the Keycloak project.
+ U(https://www.puppeteers.net/blog/keycloak-authorization-services-rest-api-paths-and-payload/).
attributes:
- check_mode:
- support: full
- diff_mode:
- support: full
+ check_mode:
+ support: full
+ diff_mode:
+ support: full
+ action_group:
+ version_added: 10.2.0
options:
- state:
- description:
- - State of the authorization scope.
- - On V(present), the authorization scope will be created (or updated if it exists already).
- - On V(absent), the authorization scope will be removed if it exists.
- choices: ['present', 'absent']
- default: 'present'
- type: str
- name:
- description:
- - Name of the authorization scope to create.
- type: str
- required: true
- display_name:
- description:
- - The display name of the authorization scope.
- type: str
- required: false
- icon_uri:
- description:
- - The icon URI for the authorization scope.
- type: str
- required: false
- client_id:
- description:
- - The C(clientId) of the Keycloak client that should have the authorization scope.
- - This is usually a human-readable name of the Keycloak client.
- type: str
- required: true
- realm:
- description:
- - The name of the Keycloak realm the Keycloak client is in.
- type: str
- required: true
+ state:
+ description:
+ - State of the authorization scope.
+ - On V(present), the authorization scope is created (or updated if it exists already).
+ - On V(absent), the authorization scope is removed if it exists.
+ choices: ['present', 'absent']
+ default: 'present'
+ type: str
+ name:
+ description:
+ - Name of the authorization scope to create.
+ type: str
+ required: true
+ display_name:
+ description:
+ - The display name of the authorization scope.
+ type: str
+ required: false
+ icon_uri:
+ description:
+ - The icon URI for the authorization scope.
+ type: str
+ required: false
+ client_id:
+ description:
+ - The C(clientId) of the Keycloak client that should have the authorization scope.
+ - This is usually a human-readable name of the Keycloak client.
+ type: str
+ required: true
+ realm:
+ description:
+ - The name of the Keycloak realm the Keycloak client is in.
+ type: str
+ required: true
extends_documentation_fragment:
- - community.general.keycloak
- - community.general.attributes
+ - community.general.keycloak
+ - community.general.keycloak.actiongroup_keycloak
+ - community.general.attributes
author:
- - Samuli Seppänen (@mattock)
-'''
+ - Samuli Seppänen (@mattock)
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Manage Keycloak file:delete authorization scope
keycloak_authz_authorization_scope:
name: file:delete
@@ -92,41 +90,40 @@ EXAMPLES = '''
auth_username: keycloak
auth_password: keycloak
auth_realm: master
-'''
+"""
-RETURN = '''
+RETURN = r"""
msg:
- description: Message as to what action was taken.
- returned: always
- type: str
+ description: Message as to what action was taken.
+ returned: always
+ type: str
end_state:
- description: Representation of the authorization scope after module execution.
- returned: on success
- type: complex
- contains:
- id:
- description: ID of the authorization scope.
- type: str
- returned: when O(state=present)
- sample: a6ab1cf2-1001-40ec-9f39-48f23b6a0a41
- name:
- description: Name of the authorization scope.
- type: str
- returned: when O(state=present)
- sample: file:delete
- display_name:
- description: Display name of the authorization scope.
- type: str
- returned: when O(state=present)
- sample: File delete
- icon_uri:
- description: Icon URI for the authorization scope.
- type: str
- returned: when O(state=present)
- sample: http://localhost/icon.png
-
-'''
+ description: Representation of the authorization scope after module execution.
+ returned: on success
+ type: complex
+ contains:
+ id:
+ description: ID of the authorization scope.
+ type: str
+ returned: when O(state=present)
+ sample: a6ab1cf2-1001-40ec-9f39-48f23b6a0a41
+ name:
+ description: Name of the authorization scope.
+ type: str
+ returned: when O(state=present)
+ sample: file:delete
+ display_name:
+ description: Display name of the authorization scope.
+ type: str
+ returned: when O(state=present)
+ sample: File delete
+ icon_uri:
+ description: Icon URI for the authorization scope.
+ type: str
+ returned: when O(state=present)
+ sample: http://localhost/icon.png
+"""
from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, \
keycloak_argument_spec, get_token, KeycloakError
@@ -145,8 +142,8 @@ def main():
state=dict(type='str', default='present',
choices=['present', 'absent']),
name=dict(type='str', required=True),
- display_name=dict(type='str', required=False),
- icon_uri=dict(type='str', required=False),
+ display_name=dict(type='str'),
+ icon_uri=dict(type='str'),
client_id=dict(type='str', required=True),
realm=dict(type='str', required=True)
)
@@ -156,8 +153,10 @@ def main():
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True,
required_one_of=(
- [['token', 'auth_realm', 'auth_username', 'auth_password']]),
- required_together=([['auth_realm', 'auth_username', 'auth_password']]))
+ [['token', 'auth_realm', 'auth_username', 'auth_password', 'auth_client_id', 'auth_client_secret']]),
+ required_together=([['auth_username', 'auth_password']]),
+ required_by={'refresh_token': 'auth_realm'},
+ )
result = dict(changed=False, msg='', end_state={}, diff=dict(before={}, after={}))
diff --git a/plugins/modules/keycloak_authz_custom_policy.py b/plugins/modules/keycloak_authz_custom_policy.py
index 8363c252e2..9607c0172c 100644
--- a/plugins/modules/keycloak_authz_custom_policy.py
+++ b/plugins/modules/keycloak_authz_custom_policy.py
@@ -9,75 +9,73 @@
from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
module: keycloak_authz_custom_policy
-short_description: Allows administration of Keycloak client custom Javascript policies via Keycloak API
+short_description: Allows administration of Keycloak client custom Javascript policies using Keycloak API
version_added: 7.5.0
description:
- - This module allows the administration of Keycloak client custom Javascript via the Keycloak REST
- API. Custom Javascript policies are only available if a client has Authorization enabled and if
- they have been deployed to the Keycloak server as JAR files.
-
- - This module requires access to the REST API via OpenID Connect; the user connecting and the realm
- being used must have the requisite access rights. In a default Keycloak installation, admin-cli
- and an admin user would work, as would a separate realm definition with the scope tailored
- to your needs and a user having the expected roles.
-
- - The names of module options are snake_cased versions of the camelCase options used by Keycloak.
- The Authorization Services paths and payloads have not officially been documented by the Keycloak project.
- U(https://www.puppeteers.net/blog/keycloak-authorization-services-rest-api-paths-and-payload/)
-
+ - This module allows the administration of Keycloak client custom Javascript using the Keycloak REST API. Custom Javascript
+ policies are only available if a client has Authorization enabled and if they have been deployed to the Keycloak server
+ as JAR files.
+ - This module requires access to the REST API using OpenID Connect; the user connecting and the realm being used must have
+ the requisite access rights. In a default Keycloak installation, admin-cli and an admin user would work, as would a separate
+ realm definition with the scope tailored to your needs and a user having the expected roles.
+ - The names of module options are snake_cased versions of the camelCase options used by Keycloak. The Authorization Services
+ paths and payloads have not officially been documented by the Keycloak project.
+ U(https://www.puppeteers.net/blog/keycloak-authorization-services-rest-api-paths-and-payload/).
attributes:
- check_mode:
- support: full
- diff_mode:
- support: none
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+ action_group:
+ version_added: 10.2.0
options:
- state:
- description:
- - State of the custom policy.
- - On V(present), the custom policy will be created (or updated if it exists already).
- - On V(absent), the custom policy will be removed if it exists.
- choices: ['present', 'absent']
- default: 'present'
- type: str
- name:
- description:
- - Name of the custom policy to create.
- type: str
- required: true
- policy_type:
- description:
- - The type of the policy. This must match the name of the custom policy deployed to the server.
- - Multiple policies pointing to the same policy type can be created, but their names have to differ.
- type: str
- required: true
- client_id:
- description:
- - The V(clientId) of the Keycloak client that should have the custom policy attached to it.
- - This is usually a human-readable name of the Keycloak client.
- type: str
- required: true
- realm:
- description:
- - The name of the Keycloak realm the Keycloak client is in.
- type: str
- required: true
+ state:
+ description:
+ - State of the custom policy.
+ - On V(present), the custom policy is created (or updated if it exists already).
+ - On V(absent), the custom policy is removed if it exists.
+ choices: ['present', 'absent']
+ default: 'present'
+ type: str
+ name:
+ description:
+ - Name of the custom policy to create.
+ type: str
+ required: true
+ policy_type:
+ description:
+ - The type of the policy. This must match the name of the custom policy deployed to the server.
+ - Multiple policies pointing to the same policy type can be created, but their names have to differ.
+ type: str
+ required: true
+ client_id:
+ description:
+ - The V(clientId) of the Keycloak client that should have the custom policy attached to it.
+ - This is usually a human-readable name of the Keycloak client.
+ type: str
+ required: true
+ realm:
+ description:
+ - The name of the Keycloak realm the Keycloak client is in.
+ type: str
+ required: true
extends_documentation_fragment:
- - community.general.keycloak
- - community.general.attributes
+ - community.general.keycloak
+ - community.general.keycloak.actiongroup_keycloak
+ - community.general.attributes
author:
- - Samuli Seppänen (@mattock)
-'''
+ - Samuli Seppänen (@mattock)
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Manage Keycloak custom authorization policy
community.general.keycloak_authz_custom_policy:
name: OnlyOwner
@@ -89,31 +87,30 @@ EXAMPLES = '''
auth_username: keycloak
auth_password: keycloak
auth_realm: master
-'''
+"""
-RETURN = '''
+RETURN = r"""
msg:
- description: Message as to what action was taken.
- returned: always
- type: str
+ description: Message as to what action was taken.
+ returned: always
+ type: str
end_state:
- description: Representation of the custom policy after module execution.
- returned: on success
- type: dict
- contains:
- name:
- description: Name of the custom policy.
- type: str
- returned: when I(state=present)
- sample: file:delete
- policy_type:
- description: Type of custom policy.
- type: str
- returned: when I(state=present)
- sample: File delete
-
-'''
+ description: Representation of the custom policy after module execution.
+ returned: on success
+ type: dict
+ contains:
+ name:
+ description: Name of the custom policy.
+ type: str
+ returned: when I(state=present)
+ sample: file:delete
+ policy_type:
+ description: Type of custom policy.
+ type: str
+ returned: when I(state=present)
+ sample: File delete
+"""
from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, \
keycloak_argument_spec, get_token, KeycloakError
@@ -142,8 +139,10 @@ def main():
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True,
required_one_of=(
- [['token', 'auth_realm', 'auth_username', 'auth_password']]),
- required_together=([['auth_realm', 'auth_username', 'auth_password']]))
+ [['token', 'auth_realm', 'auth_username', 'auth_password', 'auth_client_id', 'auth_client_secret']]),
+ required_together=([['auth_username', 'auth_password']]),
+ required_by={'refresh_token': 'auth_realm'},
+ )
result = dict(changed=False, msg='', end_state={})
diff --git a/plugins/modules/keycloak_authz_permission.py b/plugins/modules/keycloak_authz_permission.py
index ef81fb8c31..74bc6cf956 100644
--- a/plugins/modules/keycloak_authz_permission.py
+++ b/plugins/modules/keycloak_authz_permission.py
@@ -9,125 +9,121 @@
from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
module: keycloak_authz_permission
version_added: 7.2.0
-short_description: Allows administration of Keycloak client authorization permissions via Keycloak API
+short_description: Allows administration of Keycloak client authorization permissions using Keycloak API
description:
- - This module allows the administration of Keycloak client authorization permissions via the Keycloak REST
- API. Authorization permissions are only available if a client has Authorization enabled.
-
- - There are some peculiarities in JSON paths and payloads for authorization permissions. In particular
- POST and PUT operations are targeted at permission endpoints, whereas GET requests go to policies
- endpoint. To make matters more interesting the JSON responses from GET requests return data in a
- different format than what is expected for POST and PUT. The end result is that it is not possible to
- detect changes to things like policies, scopes or resources - at least not without a large number of
- additional API calls. Therefore this module always updates authorization permissions instead of
- attempting to determine if changes are truly needed.
-
- - This module requires access to the REST API via OpenID Connect; the user connecting and the realm
- being used must have the requisite access rights. In a default Keycloak installation, admin-cli
- and an admin user would work, as would a separate realm definition with the scope tailored
- to your needs and a user having the expected roles.
-
- - The names of module options are snake_cased versions of the camelCase options used by Keycloak.
- The Authorization Services paths and payloads have not officially been documented by the Keycloak project.
- U(https://www.puppeteers.net/blog/keycloak-authorization-services-rest-api-paths-and-payload/)
-
+ - This module allows the administration of Keycloak client authorization permissions using the Keycloak REST API. Authorization
+ permissions are only available if a client has Authorization enabled.
+ - There are some peculiarities in JSON paths and payloads for authorization permissions. In particular POST and PUT operations
+ are targeted at permission endpoints, whereas GET requests go to policies endpoint. To make matters more interesting the
+ JSON responses from GET requests return data in a different format than what is expected for POST and PUT. The end result
+ is that it is not possible to detect changes to things like policies, scopes or resources - at least not without a large
+ number of additional API calls. Therefore this module always updates authorization permissions instead of attempting to
+ determine if changes are truly needed.
+ - This module requires access to the REST API using OpenID Connect; the user connecting and the realm being used must have
+ the requisite access rights. In a default Keycloak installation, admin-cli and an admin user would work, as would a separate
+ realm definition with the scope tailored to your needs and a user having the expected roles.
+ - The names of module options are snake_cased versions of the camelCase options used by Keycloak. The Authorization Services
+ paths and payloads have not officially been documented by the Keycloak project.
+ U(https://www.puppeteers.net/blog/keycloak-authorization-services-rest-api-paths-and-payload/).
attributes:
- check_mode:
- support: full
- diff_mode:
- support: none
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+ action_group:
+ version_added: 10.2.0
options:
- state:
- description:
- - State of the authorization permission.
- - On V(present), the authorization permission will be created (or updated if it exists already).
- - On V(absent), the authorization permission will be removed if it exists.
- choices: ['present', 'absent']
- default: 'present'
- type: str
- name:
- description:
- - Name of the authorization permission to create.
- type: str
- required: true
+ state:
description:
- description:
- - The description of the authorization permission.
- type: str
- required: false
- permission_type:
- description:
- - The type of authorization permission.
- - On V(scope) create a scope-based permission.
- - On V(resource) create a resource-based permission.
- type: str
- required: true
- choices:
- - resource
- - scope
- decision_strategy:
- description:
- - The decision strategy to use with this permission.
- type: str
- default: UNANIMOUS
- required: false
- choices:
- - UNANIMOUS
- - AFFIRMATIVE
- - CONSENSUS
- resources:
- description:
- - Resource names to attach to this permission.
- - Scope-based permissions can only include one resource.
- - Resource-based permissions can include multiple resources.
- type: list
- elements: str
- default: []
- required: false
- scopes:
- description:
- - Scope names to attach to this permission.
- - Resource-based permissions cannot have scopes attached to them.
- type: list
- elements: str
- default: []
- required: false
- policies:
- description:
- - Policy names to attach to this permission.
- type: list
- elements: str
- default: []
- required: false
- client_id:
- description:
- - The clientId of the keycloak client that should have the authorization scope.
- - This is usually a human-readable name of the Keycloak client.
- type: str
- required: true
- realm:
- description:
- - The name of the Keycloak realm the Keycloak client is in.
- type: str
- required: true
+ - State of the authorization permission.
+ - On V(present), the authorization permission is created (or updated if it exists already).
+ - On V(absent), the authorization permission is removed if it exists.
+ choices: ['present', 'absent']
+ default: 'present'
+ type: str
+ name:
+ description:
+ - Name of the authorization permission to create.
+ type: str
+ required: true
+ description:
+ description:
+ - The description of the authorization permission.
+ type: str
+ required: false
+ permission_type:
+ description:
+ - The type of authorization permission.
+ - On V(scope) create a scope-based permission.
+ - On V(resource) create a resource-based permission.
+ type: str
+ required: true
+ choices:
+ - resource
+ - scope
+ decision_strategy:
+ description:
+ - The decision strategy to use with this permission.
+ type: str
+ default: UNANIMOUS
+ required: false
+ choices:
+ - UNANIMOUS
+ - AFFIRMATIVE
+ - CONSENSUS
+ resources:
+ description:
+ - Resource names to attach to this permission.
+ - Scope-based permissions can only include one resource.
+ - Resource-based permissions can include multiple resources.
+ type: list
+ elements: str
+ default: []
+ required: false
+ scopes:
+ description:
+ - Scope names to attach to this permission.
+ - Resource-based permissions cannot have scopes attached to them.
+ type: list
+ elements: str
+ default: []
+ required: false
+ policies:
+ description:
+ - Policy names to attach to this permission.
+ type: list
+ elements: str
+ default: []
+ required: false
+ client_id:
+ description:
+ - The clientId of the keycloak client that should have the authorization scope.
+ - This is usually a human-readable name of the Keycloak client.
+ type: str
+ required: true
+ realm:
+ description:
+ - The name of the Keycloak realm the Keycloak client is in.
+ type: str
+ required: true
extends_documentation_fragment:
- - community.general.keycloak
- - community.general.attributes
+ - community.general.keycloak
+ - community.general.keycloak.actiongroup_keycloak
+ - community.general.attributes
author:
- - Samuli Seppänen (@mattock)
-'''
+ - Samuli Seppänen (@mattock)
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Manage scope-based Keycloak authorization permission
community.general.keycloak_authz_permission:
name: ScopePermission
@@ -161,68 +157,68 @@ EXAMPLES = '''
auth_username: keycloak
auth_password: keycloak
auth_realm: master
-'''
+"""
-RETURN = '''
+RETURN = r"""
msg:
- description: Message as to what action was taken.
- returned: always
- type: str
+ description: Message as to what action was taken.
+ returned: always
+ type: str
end_state:
- description: Representation of the authorization permission after module execution.
- returned: on success
- type: complex
- contains:
- id:
- description: ID of the authorization permission.
- type: str
- returned: when O(state=present)
- sample: 9da05cd2-b273-4354-bbd8-0c133918a454
- name:
- description: Name of the authorization permission.
- type: str
- returned: when O(state=present)
- sample: ResourcePermission
- description:
- description: Description of the authorization permission.
- type: str
- returned: when O(state=present)
- sample: Resource Permission
- type:
- description: Type of the authorization permission.
- type: str
- returned: when O(state=present)
- sample: resource
- decisionStrategy:
- description: The decision strategy to use.
- type: str
- returned: when O(state=present)
- sample: UNANIMOUS
- logic:
- description: The logic used for the permission (part of the payload, but has a fixed value).
- type: str
- returned: when O(state=present)
- sample: POSITIVE
- resources:
- description: IDs of resources attached to this permission.
- type: list
- returned: when O(state=present)
- sample:
- - 49e052ff-100d-4b79-a9dd-52669ed3c11d
- scopes:
- description: IDs of scopes attached to this permission.
- type: list
- returned: when O(state=present)
- sample:
- - 9da05cd2-b273-4354-bbd8-0c133918a454
- policies:
- description: IDs of policies attached to this permission.
- type: list
- returned: when O(state=present)
- sample:
- - 9da05cd2-b273-4354-bbd8-0c133918a454
-'''
+ description: Representation of the authorization permission after module execution.
+ returned: on success
+ type: complex
+ contains:
+ id:
+ description: ID of the authorization permission.
+ type: str
+ returned: when O(state=present)
+ sample: 9da05cd2-b273-4354-bbd8-0c133918a454
+ name:
+ description: Name of the authorization permission.
+ type: str
+ returned: when O(state=present)
+ sample: ResourcePermission
+ description:
+ description: Description of the authorization permission.
+ type: str
+ returned: when O(state=present)
+ sample: Resource Permission
+ type:
+ description: Type of the authorization permission.
+ type: str
+ returned: when O(state=present)
+ sample: resource
+ decisionStrategy:
+ description: The decision strategy to use.
+ type: str
+ returned: when O(state=present)
+ sample: UNANIMOUS
+ logic:
+ description: The logic used for the permission (part of the payload, but has a fixed value).
+ type: str
+ returned: when O(state=present)
+ sample: POSITIVE
+ resources:
+ description: IDs of resources attached to this permission.
+ type: list
+ returned: when O(state=present)
+ sample:
+ - 49e052ff-100d-4b79-a9dd-52669ed3c11d
+ scopes:
+ description: IDs of scopes attached to this permission.
+ type: list
+ returned: when O(state=present)
+ sample:
+ - 9da05cd2-b273-4354-bbd8-0c133918a454
+ policies:
+ description: IDs of policies attached to this permission.
+ type: list
+ returned: when O(state=present)
+ sample:
+ - 9da05cd2-b273-4354-bbd8-0c133918a454
+"""
from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, \
keycloak_argument_spec, get_token, KeycloakError
@@ -241,13 +237,13 @@ def main():
state=dict(type='str', default='present',
choices=['present', 'absent']),
name=dict(type='str', required=True),
- description=dict(type='str', required=False),
+ description=dict(type='str'),
permission_type=dict(type='str', choices=['scope', 'resource'], required=True),
decision_strategy=dict(type='str', default='UNANIMOUS',
choices=['UNANIMOUS', 'AFFIRMATIVE', 'CONSENSUS']),
- resources=dict(type='list', elements='str', default=[], required=False),
- scopes=dict(type='list', elements='str', default=[], required=False),
- policies=dict(type='list', elements='str', default=[], required=False),
+ resources=dict(type='list', elements='str', default=[]),
+ scopes=dict(type='list', elements='str', default=[]),
+ policies=dict(type='list', elements='str', default=[]),
client_id=dict(type='str', required=True),
realm=dict(type='str', required=True)
)
@@ -257,8 +253,10 @@ def main():
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True,
required_one_of=(
- [['token', 'auth_realm', 'auth_username', 'auth_password']]),
- required_together=([['auth_realm', 'auth_username', 'auth_password']]))
+ [['token', 'auth_realm', 'auth_username', 'auth_password', 'auth_client_id', 'auth_client_secret']]),
+ required_together=([['auth_username', 'auth_password']]),
+ required_by={'refresh_token': 'auth_realm'},
+ )
# Convenience variables
state = module.params.get('state')
diff --git a/plugins/modules/keycloak_authz_permission_info.py b/plugins/modules/keycloak_authz_permission_info.py
index 8b4e96b416..af7318315f 100644
--- a/plugins/modules/keycloak_authz_permission_info.py
+++ b/plugins/modules/keycloak_authz_permission_info.py
@@ -9,8 +9,7 @@
from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
module: keycloak_authz_permission_info
version_added: 7.2.0
@@ -18,47 +17,47 @@ version_added: 7.2.0
short_description: Query Keycloak client authorization permissions information
description:
- - This module allows querying information about Keycloak client authorization permissions from the
- resources endpoint via the Keycloak REST API. Authorization permissions are only available if a
- client has Authorization enabled.
-
- - This module requires access to the REST API via OpenID Connect; the user connecting and the realm
- being used must have the requisite access rights. In a default Keycloak installation, admin-cli
- and an admin user would work, as would a separate realm definition with the scope tailored
- to your needs and a user having the expected roles.
-
- - The names of module options are snake_cased versions of the camelCase options used by Keycloak.
- The Authorization Services paths and payloads have not officially been documented by the Keycloak project.
- U(https://www.puppeteers.net/blog/keycloak-authorization-services-rest-api-paths-and-payload/)
+ - This module allows querying information about Keycloak client authorization permissions from the resources endpoint using
+ the Keycloak REST API. Authorization permissions are only available if a client has Authorization enabled.
+ - This module requires access to the REST API using OpenID Connect; the user connecting and the realm being used must have
+ the requisite access rights. In a default Keycloak installation, admin-cli and an admin user would work, as would a separate
+ realm definition with the scope tailored to your needs and a user having the expected roles.
+ - The names of module options are snake_cased versions of the camelCase options used by Keycloak. The Authorization Services
+ paths and payloads have not officially been documented by the Keycloak project.
+ U(https://www.puppeteers.net/blog/keycloak-authorization-services-rest-api-paths-and-payload/).
+attributes:
+ action_group:
+ version_added: 10.2.0
options:
- name:
- description:
- - Name of the authorization permission to create.
- type: str
- required: true
- client_id:
- description:
- - The clientId of the keycloak client that should have the authorization scope.
- - This is usually a human-readable name of the Keycloak client.
- type: str
- required: true
- realm:
- description:
- - The name of the Keycloak realm the Keycloak client is in.
- type: str
- required: true
+ name:
+ description:
+ - Name of the authorization permission to create.
+ type: str
+ required: true
+ client_id:
+ description:
+ - The clientId of the keycloak client that should have the authorization scope.
+ - This is usually a human-readable name of the Keycloak client.
+ type: str
+ required: true
+ realm:
+ description:
+ - The name of the Keycloak realm the Keycloak client is in.
+ type: str
+ required: true
extends_documentation_fragment:
- - community.general.keycloak
- - community.general.attributes
- - community.general.attributes.info_module
+ - community.general.keycloak
+ - community.general.keycloak.actiongroup_keycloak
+ - community.general.attributes
+ - community.general.attributes.info_module
author:
- - Samuli Seppänen (@mattock)
-'''
+ - Samuli Seppänen (@mattock)
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Query Keycloak authorization permission
community.general.keycloak_authz_permission_info:
name: ScopePermission
@@ -68,48 +67,48 @@ EXAMPLES = '''
auth_username: keycloak
auth_password: keycloak
auth_realm: master
-'''
+"""
-RETURN = '''
+RETURN = r"""
msg:
- description: Message as to what action was taken.
- returned: always
- type: str
+ description: Message as to what action was taken.
+ returned: always
+ type: str
queried_state:
- description: State of the resource (a policy) as seen by Keycloak.
- returned: on success
- type: complex
- contains:
- id:
- description: ID of the authorization permission.
- type: str
- sample: 9da05cd2-b273-4354-bbd8-0c133918a454
- name:
- description: Name of the authorization permission.
- type: str
- sample: ResourcePermission
- description:
- description: Description of the authorization permission.
- type: str
- sample: Resource Permission
- type:
- description: Type of the authorization permission.
- type: str
- sample: resource
- decisionStrategy:
- description: The decision strategy.
- type: str
- sample: UNANIMOUS
- logic:
- description: The logic used for the permission (part of the payload, but has a fixed value).
- type: str
- sample: POSITIVE
- config:
- description: Configuration of the permission (empty in all observed cases).
- type: dict
- sample: {}
-'''
+ description: State of the resource (a policy) as seen by Keycloak.
+ returned: on success
+ type: complex
+ contains:
+ id:
+ description: ID of the authorization permission.
+ type: str
+ sample: 9da05cd2-b273-4354-bbd8-0c133918a454
+ name:
+ description: Name of the authorization permission.
+ type: str
+ sample: ResourcePermission
+ description:
+ description: Description of the authorization permission.
+ type: str
+ sample: Resource Permission
+ type:
+ description: Type of the authorization permission.
+ type: str
+ sample: resource
+ decisionStrategy:
+ description: The decision strategy.
+ type: str
+ sample: UNANIMOUS
+ logic:
+ description: The logic used for the permission (part of the payload, but has a fixed value).
+ type: str
+ sample: POSITIVE
+ config:
+ description: Configuration of the permission (empty in all observed cases).
+ type: dict
+ sample: {}
+"""
from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, \
keycloak_argument_spec, get_token, KeycloakError
@@ -135,8 +134,10 @@ def main():
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True,
required_one_of=(
- [['token', 'auth_realm', 'auth_username', 'auth_password']]),
- required_together=([['auth_realm', 'auth_username', 'auth_password']]))
+ [['token', 'auth_realm', 'auth_username', 'auth_password', 'auth_client_id', 'auth_client_secret']]),
+ required_together=([['auth_username', 'auth_password']]),
+ required_by={'refresh_token': 'auth_realm'},
+ )
# Convenience variables
name = module.params.get('name')
diff --git a/plugins/modules/keycloak_client.py b/plugins/modules/keycloak_client.py
index cb92ebc87a..6c8a7b1383 100644
--- a/plugins/modules/keycloak_client.py
+++ b/plugins/modules/keycloak_client.py
@@ -8,600 +8,550 @@
from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
module: keycloak_client
-short_description: Allows administration of Keycloak clients via Keycloak API
+short_description: Allows administration of Keycloak clients using Keycloak API
description:
- - This module allows the administration of Keycloak clients via the Keycloak REST API. It
- requires access to the REST API via OpenID Connect; the user connecting and the client being
- used must have the requisite access rights. In a default Keycloak installation, admin-cli
- and an admin user would work, as would a separate client definition with the scope tailored
- to your needs and a user having the expected roles.
-
- - The names of module options are snake_cased versions of the camelCase ones found in the
- Keycloak API and its documentation at U(https://www.keycloak.org/docs-api/8.0/rest-api/index.html).
- Aliases are provided so camelCased versions can be used as well.
-
- - The Keycloak API does not always sanity check inputs e.g. you can set
- SAML-specific settings on an OpenID Connect client for instance and vice versa. Be careful.
- If you do not specify a setting, usually a sensible default is chosen.
-
+ - This module allows the administration of Keycloak clients using the Keycloak REST API. It requires access to the REST
+ API using OpenID Connect; the user connecting and the client being used must have the requisite access rights. In a default
+ Keycloak installation, admin-cli and an admin user would work, as would a separate client definition with the scope tailored
+ to your needs and a user having the expected roles.
+ - The names of module options are snake_cased versions of the camelCase ones found in the Keycloak API and its documentation
+ at U(https://www.keycloak.org/docs-api/8.0/rest-api/index.html). Aliases are provided so camelCased versions can be used
+ as well.
+ - The Keycloak API does not always sanity check inputs, for example you can set SAML-specific settings on an OpenID Connect
+ client for instance and the other way around. Be careful. If you do not specify a setting, usually a sensible default
+ is chosen.
attributes:
- check_mode:
- support: full
- diff_mode:
- support: full
+ check_mode:
+ support: full
+ diff_mode:
+ support: full
+ action_group:
+ version_added: 10.2.0
options:
- state:
- description:
- - State of the client
- - On V(present), the client will be created (or updated if it exists already).
- - On V(absent), the client will be removed if it exists
- choices: ['present', 'absent']
- default: 'present'
- type: str
-
- realm:
- description:
- - The realm to create the client in.
- type: str
- default: master
-
- client_id:
- description:
- - Client id of client to be worked on. This is usually an alphanumeric name chosen by
- you. Either this or O(id) is required. If you specify both, O(id) takes precedence.
- This is 'clientId' in the Keycloak REST API.
- aliases:
- - clientId
- type: str
-
- id:
- description:
- - Id of client to be worked on. This is usually an UUID. Either this or O(client_id)
- is required. If you specify both, this takes precedence.
- type: str
-
- name:
- description:
- - Name of the client (this is not the same as O(client_id)).
- type: str
-
+ state:
description:
+ - State of the client.
+ - On V(present), the client are created (or updated if it exists already).
+ - On V(absent), the client are removed if it exists.
+ choices: ['present', 'absent']
+ default: 'present'
+ type: str
+
+ realm:
+ description:
+ - The realm to create the client in.
+ type: str
+ default: master
+
+ client_id:
+ description:
+ - Client ID of client to be worked on. This is usually an alphanumeric name chosen by you. Either this or O(id) is required.
+ If you specify both, O(id) takes precedence. This is C(clientId) in the Keycloak REST API.
+ aliases:
+ - clientId
+ type: str
+
+ id:
+ description:
+ - ID of client to be worked on. This is usually an UUID. Either this or O(client_id) is required. If you specify both,
+ this takes precedence.
+ type: str
+
+ name:
+ description:
+ - Name of the client (this is not the same as O(client_id)).
+ type: str
+
+ description:
+ description:
+ - Description of the client in Keycloak.
+ type: str
+
+ root_url:
+ description:
+ - Root URL appended to relative URLs for this client. This is C(rootUrl) in the Keycloak REST API.
+ aliases:
+ - rootUrl
+ type: str
+
+ admin_url:
+ description:
+ - URL to the admin interface of the client. This is C(adminUrl) in the Keycloak REST API.
+ aliases:
+ - adminUrl
+ type: str
+
+ base_url:
+ description:
+ - Default URL to use when the auth server needs to redirect or link back to the client This is C(baseUrl) in the Keycloak
+ REST API.
+ aliases:
+ - baseUrl
+ type: str
+
+ enabled:
+ description:
+ - Is this client enabled or not?
+ type: bool
+
+ client_authenticator_type:
+ description:
+ - How do clients authenticate with the auth server? Either V(client-secret), V(client-jwt), or V(client-x509) can be
+ chosen. When using V(client-secret), the module parameter O(secret) can set it, for V(client-jwt), you can use the
+ keys C(use.jwks.url), C(jwks.url), and C(jwt.credential.certificate) in the O(attributes) module parameter to configure
+ its behavior. For V(client-x509) you can use the keys C(x509.allow.regex.pattern.comparison) and C(x509.subjectdn)
+ in the O(attributes) module parameter to configure which certificate(s) to accept.
+ - This is C(clientAuthenticatorType) in the Keycloak REST API.
+ choices: ['client-secret', 'client-jwt', 'client-x509']
+ aliases:
+ - clientAuthenticatorType
+ type: str
+
+ secret:
+ description:
+ - When using O(client_authenticator_type=client-secret) (the default), you can specify a secret here (otherwise one
+ is generated if it does not exit). If changing this secret, the module does not register a change currently (but the
+ changed secret is saved).
+ type: str
+
+ registration_access_token:
+ description:
+ - The registration access token provides access for clients to the client registration service. This is C(registrationAccessToken)
+ in the Keycloak REST API.
+ aliases:
+ - registrationAccessToken
+ type: str
+
+ default_roles:
+ description:
+ - List of default roles for this client. If the client roles referenced do not exist yet, they are created. This is
+ C(defaultRoles) in the Keycloak REST API.
+ aliases:
+ - defaultRoles
+ type: list
+ elements: str
+
+ redirect_uris:
+ description:
+ - Acceptable redirect URIs for this client. This is C(redirectUris) in the Keycloak REST API.
+ aliases:
+ - redirectUris
+ type: list
+ elements: str
+
+ web_origins:
+ description:
+ - List of allowed CORS origins. This is C(webOrigins) in the Keycloak REST API.
+ aliases:
+ - webOrigins
+ type: list
+ elements: str
+
+ not_before:
+ description:
+ - Revoke any tokens issued before this date for this client (this is a UNIX timestamp). This is C(notBefore) in the
+ Keycloak REST API.
+ type: int
+ aliases:
+ - notBefore
+
+ bearer_only:
+ description:
+ - The access type of this client is bearer-only. This is C(bearerOnly) in the Keycloak REST API.
+ aliases:
+ - bearerOnly
+ type: bool
+
+ consent_required:
+ description:
+ - If enabled, users have to consent to client access. This is C(consentRequired) in the Keycloak REST API.
+ aliases:
+ - consentRequired
+ type: bool
+
+ standard_flow_enabled:
+ description:
+ - Enable standard flow for this client or not (OpenID connect). This is C(standardFlowEnabled) in the Keycloak REST
+ API.
+ aliases:
+ - standardFlowEnabled
+ type: bool
+
+ implicit_flow_enabled:
+ description:
+ - Enable implicit flow for this client or not (OpenID connect). This is C(implicitFlowEnabled) in the Keycloak REST
+ API.
+ aliases:
+ - implicitFlowEnabled
+ type: bool
+
+ direct_access_grants_enabled:
+ description:
+ - Are direct access grants enabled for this client or not (OpenID connect). This is C(directAccessGrantsEnabled) in
+ the Keycloak REST API.
+ aliases:
+ - directAccessGrantsEnabled
+ type: bool
+
+ service_accounts_enabled:
+ description:
+ - Are service accounts enabled for this client or not (OpenID connect). This is C(serviceAccountsEnabled) in the Keycloak
+ REST API.
+ aliases:
+ - serviceAccountsEnabled
+ type: bool
+
+ authorization_services_enabled:
+ description:
+ - Are authorization services enabled for this client or not (OpenID connect). This is C(authorizationServicesEnabled)
+ in the Keycloak REST API.
+ aliases:
+ - authorizationServicesEnabled
+ type: bool
+
+ public_client:
+ description:
+ - Is the access type for this client public or not. This is C(publicClient) in the Keycloak REST API.
+ aliases:
+ - publicClient
+ type: bool
+
+ frontchannel_logout:
+ description:
+ - Is frontchannel logout enabled for this client or not. This is C(frontchannelLogout) in the Keycloak REST API.
+ aliases:
+ - frontchannelLogout
+ type: bool
+
+ protocol:
+ description:
+ - Type of client.
+ - At creation only, default value is V(openid-connect) if O(protocol) is omitted.
+ - The V(docker-v2) value was added in community.general 8.6.0.
+ type: str
+ choices: ['openid-connect', 'saml', 'docker-v2']
+
+ full_scope_allowed:
+ description:
+ - Is the "Full Scope Allowed" feature set for this client or not. This is C(fullScopeAllowed) in the Keycloak REST API.
+ aliases:
+ - fullScopeAllowed
+ type: bool
+
+ node_re_registration_timeout:
+ description:
+ - Cluster node re-registration timeout for this client. This is C(nodeReRegistrationTimeout) in the Keycloak REST API.
+ type: int
+ aliases:
+ - nodeReRegistrationTimeout
+
+ registered_nodes:
+ description:
+ - Dict of registered cluster nodes (with C(nodename) as the key and last registration time as the value). This is C(registeredNodes)
+ in the Keycloak REST API.
+ type: dict
+ aliases:
+ - registeredNodes
+
+ client_template:
+ description:
+ - Client template to use for this client. If it does not exist this field is silently dropped. This is C(clientTemplate)
+ in the Keycloak REST API.
+ type: str
+ aliases:
+ - clientTemplate
+
+ use_template_config:
+ description:
+ - Whether or not to use configuration from the O(client_template). This is C(useTemplateConfig) in the Keycloak REST
+ API.
+ aliases:
+ - useTemplateConfig
+ type: bool
+
+ use_template_scope:
+ description:
+ - Whether or not to use scope configuration from the O(client_template). This is C(useTemplateScope) in the Keycloak
+ REST API.
+ aliases:
+ - useTemplateScope
+ type: bool
+
+ use_template_mappers:
+ description:
+ - Whether or not to use mapper configuration from the O(client_template). This is C(useTemplateMappers) in the Keycloak
+ REST API.
+ aliases:
+ - useTemplateMappers
+ type: bool
+
+ always_display_in_console:
+ description:
+ - Whether or not to display this client in account console, even if the user does not have an active session.
+ aliases:
+ - alwaysDisplayInConsole
+ type: bool
+ version_added: 4.7.0
+
+ surrogate_auth_required:
+ description:
+ - Whether or not surrogate auth is required. This is C(surrogateAuthRequired) in the Keycloak REST API.
+ aliases:
+ - surrogateAuthRequired
+ type: bool
+
+ authorization_settings:
+ description:
+ - A data structure defining the authorization settings for this client. For reference, please see the Keycloak API docs
+ at U(https://www.keycloak.org/docs-api/8.0/rest-api/index.html#_resourceserverrepresentation). This is C(authorizationSettings)
+ in the Keycloak REST API.
+ type: dict
+ aliases:
+ - authorizationSettings
+
+ authentication_flow_binding_overrides:
+ description:
+ - Override realm authentication flow bindings.
+ type: dict
+ suboptions:
+ browser:
description:
- - Description of the client in Keycloak.
+ - Flow ID of the browser authentication flow.
+ - O(authentication_flow_binding_overrides.browser) and O(authentication_flow_binding_overrides.browser_name) are
+ mutually exclusive.
type: str
- root_url:
+ browser_name:
description:
- - Root URL appended to relative URLs for this client.
- This is 'rootUrl' in the Keycloak REST API.
+ - Flow name of the browser authentication flow.
+ - O(authentication_flow_binding_overrides.browser) and O(authentication_flow_binding_overrides.browser_name) are
+ mutually exclusive.
aliases:
- - rootUrl
+ - browserName
+ type: str
+ version_added: 9.1.0
+
+ direct_grant:
+ description:
+ - Flow ID of the direct grant authentication flow.
+ - O(authentication_flow_binding_overrides.direct_grant) and O(authentication_flow_binding_overrides.direct_grant_name)
+ are mutually exclusive.
+ aliases:
+ - directGrant
type: str
- admin_url:
+ direct_grant_name:
description:
- - URL to the admin interface of the client.
- This is 'adminUrl' in the Keycloak REST API.
+ - Flow name of the direct grant authentication flow.
+ - O(authentication_flow_binding_overrides.direct_grant) and O(authentication_flow_binding_overrides.direct_grant_name)
+ are mutually exclusive.
aliases:
- - adminUrl
+ - directGrantName
+ type: str
+ version_added: 9.1.0
+ aliases:
+ - authenticationFlowBindingOverrides
+ version_added: 3.4.0
+
+ default_client_scopes:
+ description:
+ - List of default client scopes.
+ aliases:
+ - defaultClientScopes
+ type: list
+ elements: str
+ version_added: 4.7.0
+
+ optional_client_scopes:
+ description:
+ - List of optional client scopes.
+ aliases:
+ - optionalClientScopes
+ type: list
+ elements: str
+ version_added: 4.7.0
+
+ protocol_mappers:
+ description:
+ - A list of dicts defining protocol mappers for this client. This is C(protocolMappers) in the Keycloak REST API.
+ aliases:
+ - protocolMappers
+ type: list
+ elements: dict
+ suboptions:
+ consentRequired:
+ description:
+ - Specifies whether a user needs to provide consent to a client for this mapper to be active.
+ type: bool
+
+ consentText:
+ description:
+ - The human-readable name of the consent the user is presented to accept.
type: str
- base_url:
+ id:
description:
- - Default URL to use when the auth server needs to redirect or link back to the client
- This is 'baseUrl' in the Keycloak REST API.
- aliases:
- - baseUrl
+ - Usually a UUID specifying the internal ID of this protocol mapper instance.
type: str
- enabled:
+ name:
description:
- - Is this client enabled or not?
- type: bool
-
- client_authenticator_type:
- description:
- - How do clients authenticate with the auth server? Either V(client-secret),
- V(client-jwt), or V(client-x509) can be chosen. When using V(client-secret), the module parameter
- O(secret) can set it, for V(client-jwt), you can use the keys C(use.jwks.url),
- C(jwks.url), and C(jwt.credential.certificate) in the O(attributes) module parameter
- to configure its behavior. For V(client-x509) you can use the keys C(x509.allow.regex.pattern.comparison)
- and C(x509.subjectdn) in the O(attributes) module parameter to configure which certificate(s) to accept.
- - This is 'clientAuthenticatorType' in the Keycloak REST API.
- choices: ['client-secret', 'client-jwt', 'client-x509']
- aliases:
- - clientAuthenticatorType
+ - The name of this protocol mapper.
type: str
- secret:
+ protocol:
description:
- - When using O(client_authenticator_type=client-secret) (the default), you can
- specify a secret here (otherwise one will be generated if it does not exit). If
- changing this secret, the module will not register a change currently (but the
- changed secret will be saved).
- type: str
-
- registration_access_token:
- description:
- - The registration access token provides access for clients to the client registration
- service.
- This is 'registrationAccessToken' in the Keycloak REST API.
- aliases:
- - registrationAccessToken
- type: str
-
- default_roles:
- description:
- - list of default roles for this client. If the client roles referenced do not exist
- yet, they will be created.
- This is 'defaultRoles' in the Keycloak REST API.
- aliases:
- - defaultRoles
- type: list
- elements: str
-
- redirect_uris:
- description:
- - Acceptable redirect URIs for this client.
- This is 'redirectUris' in the Keycloak REST API.
- aliases:
- - redirectUris
- type: list
- elements: str
-
- web_origins:
- description:
- - List of allowed CORS origins.
- This is 'webOrigins' in the Keycloak REST API.
- aliases:
- - webOrigins
- type: list
- elements: str
-
- not_before:
- description:
- - Revoke any tokens issued before this date for this client (this is a UNIX timestamp).
- This is 'notBefore' in the Keycloak REST API.
- type: int
- aliases:
- - notBefore
-
- bearer_only:
- description:
- - The access type of this client is bearer-only.
- This is 'bearerOnly' in the Keycloak REST API.
- aliases:
- - bearerOnly
- type: bool
-
- consent_required:
- description:
- - If enabled, users have to consent to client access.
- This is 'consentRequired' in the Keycloak REST API.
- aliases:
- - consentRequired
- type: bool
-
- standard_flow_enabled:
- description:
- - Enable standard flow for this client or not (OpenID connect).
- This is 'standardFlowEnabled' in the Keycloak REST API.
- aliases:
- - standardFlowEnabled
- type: bool
-
- implicit_flow_enabled:
- description:
- - Enable implicit flow for this client or not (OpenID connect).
- This is 'implicitFlowEnabled' in the Keycloak REST API.
- aliases:
- - implicitFlowEnabled
- type: bool
-
- direct_access_grants_enabled:
- description:
- - Are direct access grants enabled for this client or not (OpenID connect).
- This is 'directAccessGrantsEnabled' in the Keycloak REST API.
- aliases:
- - directAccessGrantsEnabled
- type: bool
-
- service_accounts_enabled:
- description:
- - Are service accounts enabled for this client or not (OpenID connect).
- This is 'serviceAccountsEnabled' in the Keycloak REST API.
- aliases:
- - serviceAccountsEnabled
- type: bool
-
- authorization_services_enabled:
- description:
- - Are authorization services enabled for this client or not (OpenID connect).
- This is 'authorizationServicesEnabled' in the Keycloak REST API.
- aliases:
- - authorizationServicesEnabled
- type: bool
-
- public_client:
- description:
- - Is the access type for this client public or not.
- This is 'publicClient' in the Keycloak REST API.
- aliases:
- - publicClient
- type: bool
-
- frontchannel_logout:
- description:
- - Is frontchannel logout enabled for this client or not.
- This is 'frontchannelLogout' in the Keycloak REST API.
- aliases:
- - frontchannelLogout
- type: bool
-
- protocol:
- description:
- - Type of client.
- - At creation only, default value will be V(openid-connect) if O(protocol) is omitted.
- - The V(docker-v2) value was added in community.general 8.6.0.
- type: str
+ - This specifies for which protocol this protocol mapper is active.
choices: ['openid-connect', 'saml', 'docker-v2']
-
- full_scope_allowed:
- description:
- - Is the "Full Scope Allowed" feature set for this client or not.
- This is 'fullScopeAllowed' in the Keycloak REST API.
- aliases:
- - fullScopeAllowed
- type: bool
-
- node_re_registration_timeout:
- description:
- - Cluster node re-registration timeout for this client.
- This is 'nodeReRegistrationTimeout' in the Keycloak REST API.
- type: int
- aliases:
- - nodeReRegistrationTimeout
-
- registered_nodes:
- description:
- - dict of registered cluster nodes (with C(nodename) as the key and last registration
- time as the value).
- This is 'registeredNodes' in the Keycloak REST API.
- type: dict
- aliases:
- - registeredNodes
-
- client_template:
- description:
- - Client template to use for this client. If it does not exist this field will silently
- be dropped.
- This is 'clientTemplate' in the Keycloak REST API.
type: str
- aliases:
- - clientTemplate
- use_template_config:
+ protocolMapper:
description:
- - Whether or not to use configuration from the O(client_template).
- This is 'useTemplateConfig' in the Keycloak REST API.
- aliases:
- - useTemplateConfig
- type: bool
+ - 'The Keycloak-internal name of the type of this protocol-mapper. While an exhaustive list is impossible to provide
+ since this may be extended through SPIs by the user of Keycloak, by default Keycloak as of 3.4 ships with at least:'
+ - V(docker-v2-allow-all-mapper).
+ - V(oidc-address-mapper).
+ - V(oidc-full-name-mapper).
+ - V(oidc-group-membership-mapper).
+ - V(oidc-hardcoded-claim-mapper).
+ - V(oidc-hardcoded-role-mapper).
+ - V(oidc-role-name-mapper).
+ - V(oidc-script-based-protocol-mapper).
+ - V(oidc-sha256-pairwise-sub-mapper).
+ - V(oidc-usermodel-attribute-mapper).
+ - V(oidc-usermodel-client-role-mapper).
+ - V(oidc-usermodel-property-mapper).
+ - V(oidc-usermodel-realm-role-mapper).
+ - V(oidc-usersessionmodel-note-mapper).
+ - V(saml-group-membership-mapper).
+ - V(saml-hardcode-attribute-mapper).
+ - V(saml-hardcode-role-mapper).
+ - V(saml-role-list-mapper).
+ - V(saml-role-name-mapper).
+ - V(saml-user-attribute-mapper).
+ - V(saml-user-property-mapper).
+ - V(saml-user-session-note-mapper).
+ - An exhaustive list of available mappers on your installation can be obtained on the admin console by going to
+ Server Info -> Providers and looking under 'protocol-mapper'.
+ type: str
- use_template_scope:
+ config:
description:
- - Whether or not to use scope configuration from the O(client_template).
- This is 'useTemplateScope' in the Keycloak REST API.
- aliases:
- - useTemplateScope
- type: bool
-
- use_template_mappers:
- description:
- - Whether or not to use mapper configuration from the O(client_template).
- This is 'useTemplateMappers' in the Keycloak REST API.
- aliases:
- - useTemplateMappers
- type: bool
-
- always_display_in_console:
- description:
- - Whether or not to display this client in account console, even if the
- user does not have an active session.
- aliases:
- - alwaysDisplayInConsole
- type: bool
- version_added: 4.7.0
-
- surrogate_auth_required:
- description:
- - Whether or not surrogate auth is required.
- This is 'surrogateAuthRequired' in the Keycloak REST API.
- aliases:
- - surrogateAuthRequired
- type: bool
-
- authorization_settings:
- description:
- - a data structure defining the authorization settings for this client. For reference,
- please see the Keycloak API docs at U(https://www.keycloak.org/docs-api/8.0/rest-api/index.html#_resourceserverrepresentation).
- This is 'authorizationSettings' in the Keycloak REST API.
+ - Dict specifying the configuration options for the protocol mapper; the contents differ depending on the value
+ of O(protocol_mappers[].protocolMapper) and are not documented other than by the source of the mappers and its
+ parent class(es). An example is given below. It is easiest to obtain valid config values by dumping an already-existing
+ protocol mapper configuration through check-mode in the RV(existing) field.
type: dict
- aliases:
- - authorizationSettings
- authentication_flow_binding_overrides:
+ attributes:
+ description:
+ - A dict of further attributes for this client. This can contain various configuration settings; an example is given
+ in the examples section. While an exhaustive list of permissible options is not available; possible options as of
+ Keycloak 3.4 are listed below. The Keycloak API does not validate whether a given option is appropriate for the protocol
+ used; if specified anyway, Keycloak does not use it.
+ type: dict
+ suboptions:
+ saml.authnstatement:
description:
- - Override realm authentication flow bindings.
- type: dict
- suboptions:
- browser:
- description:
- - Flow ID of the browser authentication flow.
- - O(authentication_flow_binding_overrides.browser)
- and O(authentication_flow_binding_overrides.browser_name) are mutually exclusive.
- type: str
-
- browser_name:
- description:
- - Flow name of the browser authentication flow.
- - O(authentication_flow_binding_overrides.browser)
- and O(authentication_flow_binding_overrides.browser_name) are mutually exclusive.
- aliases:
- - browserName
- type: str
- version_added: 9.1.0
-
- direct_grant:
- description:
- - Flow ID of the direct grant authentication flow.
- - O(authentication_flow_binding_overrides.direct_grant)
- and O(authentication_flow_binding_overrides.direct_grant_name) are mutually exclusive.
- aliases:
- - directGrant
- type: str
-
- direct_grant_name:
- description:
- - Flow name of the direct grant authentication flow.
- - O(authentication_flow_binding_overrides.direct_grant)
- and O(authentication_flow_binding_overrides.direct_grant_name) are mutually exclusive.
- aliases:
- - directGrantName
- type: str
- version_added: 9.1.0
- aliases:
- - authenticationFlowBindingOverrides
- version_added: 3.4.0
-
- default_client_scopes:
+ - For SAML clients, boolean specifying whether or not a statement containing method and timestamp should be included
+ in the login response.
+ saml.client.signature:
description:
- - List of default client scopes.
- aliases:
- - defaultClientScopes
- type: list
- elements: str
- version_added: 4.7.0
-
- optional_client_scopes:
+ - For SAML clients, boolean specifying whether a client signature is required and validated.
+ saml.encrypt:
description:
- - List of optional client scopes.
- aliases:
- - optionalClientScopes
- type: list
- elements: str
- version_added: 4.7.0
-
- protocol_mappers:
+ - Boolean specifying whether SAML assertions should be encrypted with the client's public key.
+ saml.force.post.binding:
description:
- - a list of dicts defining protocol mappers for this client.
- This is 'protocolMappers' in the Keycloak REST API.
- aliases:
- - protocolMappers
- type: list
- elements: dict
- suboptions:
- consentRequired:
- description:
- - Specifies whether a user needs to provide consent to a client for this mapper to be active.
- type: bool
-
- consentText:
- description:
- - The human-readable name of the consent the user is presented to accept.
- type: str
-
- id:
- description:
- - Usually a UUID specifying the internal ID of this protocol mapper instance.
- type: str
-
- name:
- description:
- - The name of this protocol mapper.
- type: str
-
- protocol:
- description:
- - This specifies for which protocol this protocol mapper is active.
- choices: ['openid-connect', 'saml', 'docker-v2']
- type: str
-
- protocolMapper:
- description:
- - "The Keycloak-internal name of the type of this protocol-mapper. While an exhaustive list is
- impossible to provide since this may be extended through SPIs by the user of Keycloak,
- by default Keycloak as of 3.4 ships with at least:"
- - V(docker-v2-allow-all-mapper)
- - V(oidc-address-mapper)
- - V(oidc-full-name-mapper)
- - V(oidc-group-membership-mapper)
- - V(oidc-hardcoded-claim-mapper)
- - V(oidc-hardcoded-role-mapper)
- - V(oidc-role-name-mapper)
- - V(oidc-script-based-protocol-mapper)
- - V(oidc-sha256-pairwise-sub-mapper)
- - V(oidc-usermodel-attribute-mapper)
- - V(oidc-usermodel-client-role-mapper)
- - V(oidc-usermodel-property-mapper)
- - V(oidc-usermodel-realm-role-mapper)
- - V(oidc-usersessionmodel-note-mapper)
- - V(saml-group-membership-mapper)
- - V(saml-hardcode-attribute-mapper)
- - V(saml-hardcode-role-mapper)
- - V(saml-role-list-mapper)
- - V(saml-role-name-mapper)
- - V(saml-user-attribute-mapper)
- - V(saml-user-property-mapper)
- - V(saml-user-session-note-mapper)
- - An exhaustive list of available mappers on your installation can be obtained on
- the admin console by going to Server Info -> Providers and looking under
- 'protocol-mapper'.
- type: str
-
- config:
- description:
- - Dict specifying the configuration options for the protocol mapper; the
- contents differ depending on the value of O(protocol_mappers[].protocolMapper) and are not documented
- other than by the source of the mappers and its parent class(es). An example is given
- below. It is easiest to obtain valid config values by dumping an already-existing
- protocol mapper configuration through check-mode in the RV(existing) field.
- type: dict
-
- attributes:
+ - For SAML clients, boolean specifying whether always to use POST binding for responses.
+ saml.onetimeuse.condition:
description:
- - A dict of further attributes for this client. This can contain various configuration
- settings; an example is given in the examples section. While an exhaustive list of
- permissible options is not available; possible options as of Keycloak 3.4 are listed below. The Keycloak
- API does not validate whether a given option is appropriate for the protocol used; if specified
- anyway, Keycloak will simply not use it.
- type: dict
- suboptions:
- saml.authnstatement:
- description:
- - For SAML clients, boolean specifying whether or not a statement containing method and timestamp
- should be included in the login response.
+ - For SAML clients, boolean specifying whether a OneTimeUse condition should be included in login responses.
+ saml.server.signature:
+ description:
+ - Boolean specifying whether SAML documents should be signed by the realm.
+ saml.server.signature.keyinfo.ext:
+ description:
+ - For SAML clients, boolean specifying whether REDIRECT signing key lookup should be optimized through inclusion
+ of the signing key ID in the SAML Extensions element.
+ saml.signature.algorithm:
+ description:
+ - Signature algorithm used to sign SAML documents. One of V(RSA_SHA256), V(RSA_SHA1), V(RSA_SHA512), or V(DSA_SHA1).
+ saml.signing.certificate:
+ description:
+ - SAML signing key certificate, base64-encoded.
+ saml.signing.private.key:
+ description:
+ - SAML signing key private key, base64-encoded.
+ saml_assertion_consumer_url_post:
+ description:
+ - SAML POST Binding URL for the client's assertion consumer service (login responses).
+ saml_assertion_consumer_url_redirect:
+ description:
+ - SAML Redirect Binding URL for the client's assertion consumer service (login responses).
+ saml_force_name_id_format:
+ description:
+ - For SAML clients, Boolean specifying whether to ignore requested NameID subject format and using the configured
+ one instead.
+ saml_name_id_format:
+ description:
+ - For SAML clients, the NameID format to use (one of V(username), V(email), V(transient), or V(persistent)).
+ saml_signature_canonicalization_method:
+ description:
+ - SAML signature canonicalization method. This is one of four values, namely V(http://www.w3.org/2001/10/xml-exc-c14n#)
+ for EXCLUSIVE, V(http://www.w3.org/2001/10/xml-exc-c14n#WithComments) for EXCLUSIVE_WITH_COMMENTS,
+ V(http://www.w3.org/TR/2001/REC-xml-c14n-20010315)
+ for INCLUSIVE, and V(http://www.w3.org/TR/2001/REC-xml-c14n-20010315#WithComments) for INCLUSIVE_WITH_COMMENTS.
+ saml_single_logout_service_url_post:
+ description:
+ - SAML POST binding URL for the client's single logout service.
+ saml_single_logout_service_url_redirect:
+ description:
+ - SAML redirect binding URL for the client's single logout service.
+ user.info.response.signature.alg:
+ description:
+ - For OpenID-Connect clients, JWA algorithm for signed UserInfo-endpoint responses. One of V(RS256) or V(unsigned).
+ request.object.signature.alg:
+ description:
+ - For OpenID-Connect clients, JWA algorithm which the client needs to use when sending OIDC request object. One
+ of V(any), V(none), V(RS256).
+ use.jwks.url:
+ description:
+ - For OpenID-Connect clients, boolean specifying whether to use a JWKS URL to obtain client public keys.
+ jwks.url:
+ description:
+ - For OpenID-Connect clients, URL where client keys in JWK are stored.
+ jwt.credential.certificate:
+ description:
+ - For OpenID-Connect clients, client certificate for validating JWT issued by client and signed by its key, base64-encoded.
+ x509.subjectdn:
+ description:
+ - For OpenID-Connect clients, subject which is used to authenticate the client.
+ type: str
+ version_added: 9.5.0
- saml.client.signature:
- description:
- - For SAML clients, boolean specifying whether a client signature is required and validated.
-
- saml.encrypt:
- description:
- - Boolean specifying whether SAML assertions should be encrypted with the client's public key.
-
- saml.force.post.binding:
- description:
- - For SAML clients, boolean specifying whether always to use POST binding for responses.
-
- saml.onetimeuse.condition:
- description:
- - For SAML clients, boolean specifying whether a OneTimeUse condition should be included in login responses.
-
- saml.server.signature:
- description:
- - Boolean specifying whether SAML documents should be signed by the realm.
-
- saml.server.signature.keyinfo.ext:
- description:
- - For SAML clients, boolean specifying whether REDIRECT signing key lookup should be optimized through inclusion
- of the signing key id in the SAML Extensions element.
-
- saml.signature.algorithm:
- description:
- - Signature algorithm used to sign SAML documents. One of V(RSA_SHA256), V(RSA_SHA1), V(RSA_SHA512), or V(DSA_SHA1).
-
- saml.signing.certificate:
- description:
- - SAML signing key certificate, base64-encoded.
-
- saml.signing.private.key:
- description:
- - SAML signing key private key, base64-encoded.
-
- saml_assertion_consumer_url_post:
- description:
- - SAML POST Binding URL for the client's assertion consumer service (login responses).
-
- saml_assertion_consumer_url_redirect:
- description:
- - SAML Redirect Binding URL for the client's assertion consumer service (login responses).
-
- saml_force_name_id_format:
- description:
- - For SAML clients, Boolean specifying whether to ignore requested NameID subject format and using the configured one instead.
-
- saml_name_id_format:
- description:
- - For SAML clients, the NameID format to use (one of V(username), V(email), V(transient), or V(persistent))
-
- saml_signature_canonicalization_method:
- description:
- - SAML signature canonicalization method. This is one of four values, namely
- V(http://www.w3.org/2001/10/xml-exc-c14n#) for EXCLUSIVE,
- V(http://www.w3.org/2001/10/xml-exc-c14n#WithComments) for EXCLUSIVE_WITH_COMMENTS,
- V(http://www.w3.org/TR/2001/REC-xml-c14n-20010315) for INCLUSIVE, and
- V(http://www.w3.org/TR/2001/REC-xml-c14n-20010315#WithComments) for INCLUSIVE_WITH_COMMENTS.
-
- saml_single_logout_service_url_post:
- description:
- - SAML POST binding url for the client's single logout service.
-
- saml_single_logout_service_url_redirect:
- description:
- - SAML redirect binding url for the client's single logout service.
-
- user.info.response.signature.alg:
- description:
- - For OpenID-Connect clients, JWA algorithm for signed UserInfo-endpoint responses. One of V(RS256) or V(unsigned).
-
- request.object.signature.alg:
- description:
- - For OpenID-Connect clients, JWA algorithm which the client needs to use when sending
- OIDC request object. One of V(any), V(none), V(RS256).
-
- use.jwks.url:
- description:
- - For OpenID-Connect clients, boolean specifying whether to use a JWKS URL to obtain client
- public keys.
-
- jwks.url:
- description:
- - For OpenID-Connect clients, URL where client keys in JWK are stored.
-
- jwt.credential.certificate:
- description:
- - For OpenID-Connect clients, client certificate for validating JWT issued by
- client and signed by its key, base64-encoded.
-
- x509.subjectdn:
- description:
- - For OpenID-Connect clients, subject which will be used to authenticate the client.
- type: str
- version_added: 9.5.0
-
- x509.allow.regex.pattern.comparison:
- description:
- - For OpenID-Connect clients, boolean specifying whether to allow C(x509.subjectdn) as regular expression.
- type: bool
- version_added: 9.5.0
+ x509.allow.regex.pattern.comparison:
+ description:
+ - For OpenID-Connect clients, boolean specifying whether to allow C(x509.subjectdn) as regular expression.
+ type: bool
+ version_added: 9.5.0
extends_documentation_fragment:
- - community.general.keycloak
- - community.general.attributes
+ - community.general.keycloak
+ - community.general.keycloak.actiongroup_keycloak
+ - community.general.attributes
author:
- - Eike Frost (@eikef)
-'''
+ - Eike Frost (@eikef)
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Create or update Keycloak client (minimal example), authentication with credentials
community.general.keycloak_client:
auth_keycloak_url: https://auth.example.com/auth
@@ -702,7 +652,7 @@ EXAMPLES = '''
- test01
- test02
authentication_flow_binding_overrides:
- browser: 4c90336b-bf1d-4b87-916d-3677ba4e5fbb
+ browser: 4c90336b-bf1d-4b87-916d-3677ba4e5fbb
protocol_mappers:
- config:
access.token.claim: true
@@ -741,45 +691,45 @@ EXAMPLES = '''
jwks.url: JWKS_URL_FOR_CLIENT_AUTH_JWT
jwt.credential.certificate: JWT_CREDENTIAL_CERTIFICATE_FOR_CLIENT_AUTH
delegate_to: localhost
-'''
+"""
-RETURN = '''
+RETURN = r"""
msg:
- description: Message as to what action was taken.
- returned: always
- type: str
- sample: "Client testclient has been updated"
+ description: Message as to what action was taken.
+ returned: always
+ type: str
+ sample: "Client testclient has been updated"
proposed:
- description: Representation of proposed client.
- returned: always
- type: dict
- sample: {
- clientId: "test"
- }
+ description: Representation of proposed client.
+ returned: always
+ type: dict
+ sample: {"clientId": "test"}
existing:
- description: Representation of existing client (sample is truncated).
- returned: always
- type: dict
- sample: {
- "adminUrl": "http://www.example.com/admin_url",
- "attributes": {
- "request.object.signature.alg": "RS256",
- }
+ description: Representation of existing client (sample is truncated).
+ returned: always
+ type: dict
+ sample:
+ {
+ "adminUrl": "http://www.example.com/admin_url",
+ "attributes": {
+ "request.object.signature.alg": "RS256"
+ }
}
end_state:
- description: Representation of client after module execution (sample is truncated).
- returned: on success
- type: dict
- sample: {
- "adminUrl": "http://www.example.com/admin_url",
- "attributes": {
- "request.object.signature.alg": "RS256",
- }
+ description: Representation of client after module execution (sample is truncated).
+ returned: on success
+ type: dict
+ sample:
+ {
+ "adminUrl": "http://www.example.com/admin_url",
+ "attributes": {
+ "request.object.signature.alg": "RS256"
+ }
}
-'''
+"""
from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, camel, \
keycloak_argument_spec, get_token, KeycloakError
@@ -1003,8 +953,10 @@ def main():
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True,
required_one_of=([['client_id', 'id'],
- ['token', 'auth_realm', 'auth_username', 'auth_password']]),
- required_together=([['auth_realm', 'auth_username', 'auth_password']]))
+ ['token', 'auth_realm', 'auth_username', 'auth_password', 'auth_client_id', 'auth_client_secret']]),
+ required_together=([['auth_username', 'auth_password']]),
+ required_by={'refresh_token': 'auth_realm'},
+ )
result = dict(changed=False, msg='', diff={}, proposed={}, existing={}, end_state={})
diff --git a/plugins/modules/keycloak_client_rolemapping.py b/plugins/modules/keycloak_client_rolemapping.py
index be419904a7..1700c99cc1 100644
--- a/plugins/modules/keycloak_client_rolemapping.py
+++ b/plugins/modules/keycloak_client_rolemapping.py
@@ -8,8 +8,7 @@
from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
module: keycloak_client_rolemapping
short_description: Allows administration of Keycloak client_rolemapping with the Keycloak API
@@ -17,126 +16,117 @@ short_description: Allows administration of Keycloak client_rolemapping with the
version_added: 3.5.0
description:
- - This module allows you to add, remove or modify Keycloak client_rolemapping with the Keycloak REST API.
- It requires access to the REST API via OpenID Connect; the user connecting and the client being
- used must have the requisite access rights. In a default Keycloak installation, admin-cli
- and an admin user would work, as would a separate client definition with the scope tailored
- to your needs and a user having the expected roles.
-
- - The names of module options are snake_cased versions of the camelCase ones found in the
- Keycloak API and its documentation at U(https://www.keycloak.org/docs-api/8.0/rest-api/index.html).
-
- - Attributes are multi-valued in the Keycloak API. All attributes are lists of individual values and will
- be returned that way by this module. You may pass single values for attributes when calling the module,
- and this will be translated into a list suitable for the API.
-
- - When updating a client_rolemapping, where possible provide the role ID to the module. This removes a lookup
- to the API to translate the name into the role ID.
-
+ - This module allows you to add, remove or modify Keycloak client_rolemapping with the Keycloak REST API. It requires access
+ to the REST API using OpenID Connect; the user connecting and the client being used must have the requisite access rights.
+ In a default Keycloak installation, admin-cli and an admin user would work, as would a separate client definition with
+ the scope tailored to your needs and a user having the expected roles.
+ - The names of module options are snake_cased versions of the camelCase ones found in the Keycloak API and its documentation
+ at U(https://www.keycloak.org/docs-api/8.0/rest-api/index.html).
+ - Attributes are multi-valued in the Keycloak API. All attributes are lists of individual values and are returned that way
+ by this module. You may pass single values for attributes when calling the module, and this is translated into a list
+ suitable for the API.
+ - When updating a client_rolemapping, where possible provide the role ID to the module. This removes a lookup to the API
+ to translate the name into the role ID.
attributes:
- check_mode:
- support: full
- diff_mode:
- support: full
+ check_mode:
+ support: full
+ diff_mode:
+ support: full
+ action_group:
+ version_added: 10.2.0
options:
- state:
- description:
- - State of the client_rolemapping.
- - On V(present), the client_rolemapping will be created if it does not yet exist, or updated with the parameters you provide.
- - On V(absent), the client_rolemapping will be removed if it exists.
- default: 'present'
- type: str
- choices:
- - present
- - absent
+ state:
+ description:
+ - State of the client_rolemapping.
+ - On V(present), the client_rolemapping is created if it does not yet exist, or updated with the parameters
+ you provide.
+ - On V(absent), the client_rolemapping is removed if it exists.
+ default: 'present'
+ type: str
+ choices:
+ - present
+ - absent
- realm:
+ realm:
+ type: str
+ description:
+ - They Keycloak realm under which this role_representation resides.
+ default: 'master'
+
+ group_name:
+ type: str
+ description:
+ - Name of the group to be mapped.
+ - This parameter is required (can be replaced by gid for less API call).
+ parents:
+ version_added: "7.1.0"
+ type: list
+ description:
+ - List of parent groups for the group to handle sorted top to bottom.
+ - Set this if your group is a subgroup and you do not provide the GID in O(gid).
+ elements: dict
+ suboptions:
+ id:
type: str
description:
- - They Keycloak realm under which this role_representation resides.
- default: 'master'
-
- group_name:
+ - Identify parent by ID.
+ - Needs less API calls than using O(parents[].name).
+ - A deep parent chain can be started at any point when first given parent is given as ID.
+ - Note that in principle both ID and name can be specified at the same time but current implementation only always
+ use just one of them, with ID being preferred.
+ name:
type: str
description:
- - Name of the group to be mapped.
- - This parameter is required (can be replaced by gid for less API call).
-
- parents:
- version_added: "7.1.0"
- type: list
- description:
- - List of parent groups for the group to handle sorted top to bottom.
- - >-
- Set this if your group is a subgroup and you do not provide the GID in O(gid).
- elements: dict
- suboptions:
- id:
- type: str
- description:
- - Identify parent by ID.
- - Needs less API calls than using O(parents[].name).
- - A deep parent chain can be started at any point when first given parent is given as ID.
- - Note that in principle both ID and name can be specified at the same time
- but current implementation only always use just one of them, with ID
- being preferred.
- name:
- type: str
- description:
- - Identify parent by name.
- - Needs more internal API calls than using O(parents[].id) to map names to ID's under the hood.
- - When giving a parent chain with only names it must be complete up to the top.
- - Note that in principle both ID and name can be specified at the same time
- but current implementation only always use just one of them, with ID
- being preferred.
- gid:
+ - Identify parent by name.
+ - Needs more internal API calls than using O(parents[].id) to map names to ID's under the hood.
+ - When giving a parent chain with only names it must be complete up to the top.
+ - Note that in principle both ID and name can be specified at the same time but current implementation only always
+ use just one of them, with ID being preferred.
+ gid:
+ type: str
+ description:
+ - ID of the group to be mapped.
+ - This parameter is not required for updating or deleting the rolemapping but providing it reduces the number of API
+ calls required.
+ client_id:
+ type: str
+ description:
+ - Name of the client to be mapped (different than O(cid)).
+ - This parameter is required (can be replaced by cid for less API call).
+ cid:
+ type: str
+ description:
+ - ID of the client to be mapped.
+ - This parameter is not required for updating or deleting the rolemapping but providing it reduces the number of API
+ calls required.
+ roles:
+ description:
+ - Roles to be mapped to the group.
+ type: list
+ elements: dict
+ suboptions:
+ name:
type: str
description:
- - Id of the group to be mapped.
- - This parameter is not required for updating or deleting the rolemapping but
- providing it will reduce the number of API calls required.
-
- client_id:
+ - Name of the role_representation.
+ - This parameter is required only when creating or updating the role_representation.
+ id:
type: str
description:
- - Name of the client to be mapped (different than O(cid)).
- - This parameter is required (can be replaced by cid for less API call).
-
- cid:
- type: str
- description:
- - Id of the client to be mapped.
- - This parameter is not required for updating or deleting the rolemapping but
- providing it will reduce the number of API calls required.
-
- roles:
- description:
- - Roles to be mapped to the group.
- type: list
- elements: dict
- suboptions:
- name:
- type: str
- description:
- - Name of the role_representation.
- - This parameter is required only when creating or updating the role_representation.
- id:
- type: str
- description:
- - The unique identifier for this role_representation.
- - This parameter is not required for updating or deleting a role_representation but
- providing it will reduce the number of API calls required.
-
+ - The unique identifier for this role_representation.
+ - This parameter is not required for updating or deleting a role_representation but providing it reduces the number
+ of API calls required.
extends_documentation_fragment:
- - community.general.keycloak
- - community.general.attributes
+ - community.general.keycloak
+ - community.general.keycloak.actiongroup_keycloak
+ - community.general.attributes
author:
- - Gaëtan Daubresse (@Gaetan2907)
-'''
+ - Gaëtan Daubresse (@Gaetan2907)
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Map a client role to a group, authentication with credentials
community.general.keycloak_client_rolemapping:
realm: MyCustomRealm
@@ -206,50 +196,49 @@ EXAMPLES = '''
- name: role_name2
id: role_id2
delegate_to: localhost
+"""
-'''
-
-RETURN = '''
+RETURN = r"""
msg:
- description: Message as to what action was taken.
- returned: always
- type: str
- sample: "Role role1 assigned to group group1."
+ description: Message as to what action was taken.
+ returned: always
+ type: str
+ sample: "Role role1 assigned to group group1."
proposed:
- description: Representation of proposed client role mapping.
- returned: always
- type: dict
- sample: {
- clientId: "test"
- }
+ description: Representation of proposed client role mapping.
+ returned: always
+ type: dict
+ sample: {"clientId": "test"}
existing:
- description:
- - Representation of existing client role mapping.
- - The sample is truncated.
- returned: always
- type: dict
- sample: {
- "adminUrl": "http://www.example.com/admin_url",
- "attributes": {
- "request.object.signature.alg": "RS256",
- }
+ description:
+ - Representation of existing client role mapping.
+ - The sample is truncated.
+ returned: always
+ type: dict
+ sample:
+ {
+ "adminUrl": "http://www.example.com/admin_url",
+ "attributes": {
+ "request.object.signature.alg": "RS256"
+ }
}
end_state:
- description:
- - Representation of client role mapping after module execution.
- - The sample is truncated.
- returned: on success
- type: dict
- sample: {
- "adminUrl": "http://www.example.com/admin_url",
- "attributes": {
- "request.object.signature.alg": "RS256",
- }
+ description:
+ - Representation of client role mapping after module execution.
+ - The sample is truncated.
+ returned: on success
+ type: dict
+ sample:
+ {
+ "adminUrl": "http://www.example.com/admin_url",
+ "attributes": {
+ "request.object.signature.alg": "RS256"
+ }
}
-'''
+"""
from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import (
KeycloakAPI, keycloak_argument_spec, get_token, KeycloakError,
@@ -291,8 +280,10 @@ def main():
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True,
- required_one_of=([['token', 'auth_realm', 'auth_username', 'auth_password']]),
- required_together=([['auth_realm', 'auth_username', 'auth_password']]))
+ required_one_of=([['token', 'auth_realm', 'auth_username', 'auth_password', 'auth_client_id', 'auth_client_secret']]),
+ required_together=([['auth_username', 'auth_password']]),
+ required_by={'refresh_token': 'auth_realm'},
+ )
result = dict(changed=False, msg='', diff={}, proposed={}, existing={}, end_state={})
diff --git a/plugins/modules/keycloak_client_rolescope.py b/plugins/modules/keycloak_client_rolescope.py
index cca72f0ddd..fcf57c2e4a 100644
--- a/plugins/modules/keycloak_client_rolescope.py
+++ b/plugins/modules/keycloak_client_rolescope.py
@@ -8,81 +8,77 @@
from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
module: keycloak_client_rolescope
-short_description: Allows administration of Keycloak client roles scope to restrict the usage of certain roles to a other specific client applications.
+short_description: Allows administration of Keycloak client roles scope to restrict the usage of certain roles to a other
+ specific client applications
version_added: 8.6.0
description:
- - This module allows you to add or remove Keycloak roles from clients scope via the Keycloak REST API.
- It requires access to the REST API via OpenID Connect; the user connecting and the client being
- used must have the requisite access rights. In a default Keycloak installation, admin-cli
- and an admin user would work, as would a separate client definition with the scope tailored
- to your needs and a user having the expected roles.
-
- - Client O(client_id) must have O(community.general.keycloak_client#module:full_scope_allowed) set to V(false).
-
- - Attributes are multi-valued in the Keycloak API. All attributes are lists of individual values and will
- be returned that way by this module. You may pass single values for attributes when calling the module,
- and this will be translated into a list suitable for the API.
-
+ - This module allows you to add or remove Keycloak roles from clients scope using the Keycloak REST API. It requires access
+ to the REST API using OpenID Connect; the user connecting and the client being used must have the requisite access rights.
+ In a default Keycloak installation, admin-cli and an admin user would work, as would a separate client definition with
+ the scope tailored to your needs and a user having the expected roles.
+ - Client O(client_id) must have O(community.general.keycloak_client#module:full_scope_allowed) set to V(false).
+ - Attributes are multi-valued in the Keycloak API. All attributes are lists of individual values and are returned that way
+ by this module. You may pass single values for attributes when calling the module, and this is translated into a list
+ suitable for the API.
attributes:
- check_mode:
- support: full
- diff_mode:
- support: full
+ check_mode:
+ support: full
+ diff_mode:
+ support: full
+ action_group:
+ version_added: 10.2.0
options:
- state:
- description:
- - State of the role mapping.
- - On V(present), all roles in O(role_names) will be mapped if not exists yet.
- - On V(absent), all roles mapping in O(role_names) will be removed if it exists.
- default: 'present'
- type: str
- choices:
- - present
- - absent
-
- realm:
- type: str
- description:
- - The Keycloak realm under which clients resides.
- default: 'master'
-
- client_id:
- type: str
- required: true
- description:
- - Roles provided in O(role_names) while be added to this client scope.
-
- client_scope_id:
- type: str
- description:
- - If the O(role_names) are client role, the client ID under which it resides.
- - If this parameter is absent, the roles are considered a realm role.
- role_names:
- required: true
- type: list
- elements: str
- description:
- - Names of roles to manipulate.
- - If O(client_scope_id) is present, all roles must be under this client.
- - If O(client_scope_id) is absent, all roles must be under the realm.
+ state:
+ description:
+ - State of the role mapping.
+ - On V(present), all roles in O(role_names) are mapped if not exist yet.
+ - On V(absent), all roles mapping in O(role_names) are removed if it exists.
+ default: 'present'
+ type: str
+ choices:
+ - present
+ - absent
+ realm:
+ type: str
+ description:
+ - The Keycloak realm under which clients resides.
+ default: 'master'
+ client_id:
+ type: str
+ required: true
+ description:
+ - Roles provided in O(role_names) while be added to this client scope.
+ client_scope_id:
+ type: str
+ description:
+ - If the O(role_names) are client role, the client ID under which it resides.
+ - If this parameter is absent, the roles are considered a realm role.
+ role_names:
+ required: true
+ type: list
+ elements: str
+ description:
+ - Names of roles to manipulate.
+ - If O(client_scope_id) is present, all roles must be under this client.
+ - If O(client_scope_id) is absent, all roles must be under the realm.
extends_documentation_fragment:
- - community.general.keycloak
- - community.general.attributes
+ - community.general.keycloak
+ - community.general.keycloak.actiongroup_keycloak
+ - community.general.attributes
author:
- - Andre Desrosiers (@desand01)
-'''
+ - Andre Desrosiers (@desand01)
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Add roles to public client scope
community.general.keycloak_client_rolescope:
auth_keycloak_url: https://auth.example.com/auth
@@ -93,8 +89,8 @@ EXAMPLES = '''
client_id: frontend-client-public
client_scope_id: backend-client-private
role_names:
- - backend-role-admin
- - backend-role-user
+ - backend-role-admin
+ - backend-role-user
- name: Remove roles from public client scope
community.general.keycloak_client_rolescope:
@@ -106,7 +102,7 @@ EXAMPLES = '''
client_id: frontend-client-public
client_scope_id: backend-client-private
role_names:
- - backend-role-admin
+ - backend-role-admin
state: absent
- name: Add realm roles to public client scope
@@ -118,39 +114,40 @@ EXAMPLES = '''
realm: MyCustomRealm
client_id: frontend-client-public
role_names:
- - realm-role-admin
- - realm-role-user
-'''
+ - realm-role-admin
+ - realm-role-user
+"""
-RETURN = '''
+RETURN = r"""
msg:
- description: Message as to what action was taken.
- returned: always
- type: str
- sample: "Client role scope for frontend-client-public has been updated"
+ description: Message as to what action was taken.
+ returned: always
+ type: str
+ sample: "Client role scope for frontend-client-public has been updated"
end_state:
- description: Representation of role role scope after module execution.
- returned: on success
- type: list
- elements: dict
- sample: [
- {
- "clientRole": false,
- "composite": false,
- "containerId": "MyCustomRealm",
- "id": "47293104-59a6-46f0-b460-2e9e3c9c424c",
- "name": "backend-role-admin"
- },
- {
- "clientRole": false,
- "composite": false,
- "containerId": "MyCustomRealm",
- "id": "39c62a6d-542c-4715-92d2-41021eb33967",
- "name": "backend-role-user"
- }
+ description: Representation of role role scope after module execution.
+ returned: on success
+ type: list
+ elements: dict
+ sample:
+ [
+ {
+ "clientRole": false,
+ "composite": false,
+ "containerId": "MyCustomRealm",
+ "id": "47293104-59a6-46f0-b460-2e9e3c9c424c",
+ "name": "backend-role-admin"
+ },
+ {
+ "clientRole": false,
+ "composite": false,
+ "containerId": "MyCustomRealm",
+ "id": "39c62a6d-542c-4715-92d2-41021eb33967",
+ "name": "backend-role-user"
+ }
]
-'''
+"""
from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, \
keycloak_argument_spec, get_token, KeycloakError
diff --git a/plugins/modules/keycloak_clientscope.py b/plugins/modules/keycloak_clientscope.py
index 35ac3d9500..ddb4e1b04b 100644
--- a/plugins/modules/keycloak_clientscope.py
+++ b/plugins/modules/keycloak_clientscope.py
@@ -8,163 +8,153 @@
from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
module: keycloak_clientscope
-short_description: Allows administration of Keycloak client_scopes via Keycloak API
+short_description: Allows administration of Keycloak client_scopes using Keycloak API
version_added: 3.4.0
description:
- - This module allows you to add, remove or modify Keycloak client_scopes via the Keycloak REST API.
- It requires access to the REST API via OpenID Connect; the user connecting and the client being
- used must have the requisite access rights. In a default Keycloak installation, admin-cli
- and an admin user would work, as would a separate client definition with the scope tailored
- to your needs and a user having the expected roles.
-
- - The names of module options are snake_cased versions of the camelCase ones found in the
- Keycloak API and its documentation at U(https://www.keycloak.org/docs-api/8.0/rest-api/index.html).
-
- - Attributes are multi-valued in the Keycloak API. All attributes are lists of individual values and will
- be returned that way by this module. You may pass single values for attributes when calling the module,
- and this will be translated into a list suitable for the API.
-
- - When updating a client_scope, where possible provide the client_scope ID to the module. This removes a lookup
- to the API to translate the name into the client_scope ID.
-
+ - This module allows you to add, remove or modify Keycloak client_scopes using the Keycloak REST API. It requires access
+ to the REST API using OpenID Connect; the user connecting and the client being used must have the requisite access rights.
+ In a default Keycloak installation, admin-cli and an admin user would work, as would a separate client definition with
+ the scope tailored to your needs and a user having the expected roles.
+ - The names of module options are snake_cased versions of the camelCase ones found in the Keycloak API and its documentation
+ at U(https://www.keycloak.org/docs-api/8.0/rest-api/index.html).
+ - Attributes are multi-valued in the Keycloak API. All attributes are lists of individual values and are returned that way
+ by this module. You may pass single values for attributes when calling the module, and this is translated into a list
+ suitable for the API.
+ - When updating a client_scope, where possible provide the client_scope ID to the module. This removes a lookup to the API
+ to translate the name into the client_scope ID.
attributes:
- check_mode:
- support: full
- diff_mode:
- support: full
+ check_mode:
+ support: full
+ diff_mode:
+ support: full
+ action_group:
+ version_added: 10.2.0
options:
- state:
- description:
- - State of the client_scope.
- - On V(present), the client_scope will be created if it does not yet exist, or updated with the parameters you provide.
- - On V(absent), the client_scope will be removed if it exists.
- default: 'present'
- type: str
- choices:
- - present
- - absent
-
- name:
- type: str
- description:
- - Name of the client_scope.
- - This parameter is required only when creating or updating the client_scope.
-
- realm:
- type: str
- description:
- - They Keycloak realm under which this client_scope resides.
- default: 'master'
-
- id:
- type: str
- description:
- - The unique identifier for this client_scope.
- - This parameter is not required for updating or deleting a client_scope but
- providing it will reduce the number of API calls required.
-
+ state:
description:
- type: str
- description:
- - Description for this client_scope.
- - This parameter is not required for updating or deleting a client_scope.
+ - State of the client_scope.
+ - On V(present), the client_scope is created if it does not yet exist, or updated with the parameters you provide.
+ - On V(absent), the client_scope is removed if it exists.
+ default: 'present'
+ type: str
+ choices:
+ - present
+ - absent
- protocol:
+ name:
+ type: str
+ description:
+ - Name of the client_scope.
+ - This parameter is required only when creating or updating the client_scope.
+ realm:
+ type: str
+ description:
+ - They Keycloak realm under which this client_scope resides.
+ default: 'master'
+
+ id:
+ type: str
+ description:
+ - The unique identifier for this client_scope.
+ - This parameter is not required for updating or deleting a client_scope but providing it reduces the number of API
+ calls required.
+ description:
+ type: str
+ description:
+ - Description for this client_scope.
+ - This parameter is not required for updating or deleting a client_scope.
+ protocol:
+ description:
+ - Type of client.
+ - The V(docker-v2) value was added in community.general 8.6.0.
+ choices: ['openid-connect', 'saml', 'wsfed', 'docker-v2']
+ type: str
+
+ protocol_mappers:
+ description:
+ - A list of dicts defining protocol mappers for this client.
+ - This is C(protocolMappers) in the Keycloak REST API.
+ aliases:
+ - protocolMappers
+ type: list
+ elements: dict
+ suboptions:
+ protocol:
description:
- - Type of client.
- - The V(docker-v2) value was added in community.general 8.6.0.
+ - This specifies for which protocol this protocol mapper.
+ - Is active.
choices: ['openid-connect', 'saml', 'wsfed', 'docker-v2']
type: str
- protocol_mappers:
+ protocolMapper:
description:
- - A list of dicts defining protocol mappers for this client.
- - This is 'protocolMappers' in the Keycloak REST API.
- aliases:
- - protocolMappers
- type: list
- elements: dict
- suboptions:
- protocol:
- description:
- - This specifies for which protocol this protocol mapper.
- - is active.
- choices: ['openid-connect', 'saml', 'wsfed', 'docker-v2']
- type: str
+ - 'The Keycloak-internal name of the type of this protocol-mapper. While an exhaustive list is impossible to provide
+ since this may be extended through SPIs by the user of Keycloak, by default Keycloak as of 3.4 ships with at least:'
+ - V(docker-v2-allow-all-mapper).
+ - V(oidc-address-mapper).
+ - V(oidc-full-name-mapper).
+ - V(oidc-group-membership-mapper).
+ - V(oidc-hardcoded-claim-mapper).
+ - V(oidc-hardcoded-role-mapper).
+ - V(oidc-role-name-mapper).
+ - V(oidc-script-based-protocol-mapper).
+ - V(oidc-sha256-pairwise-sub-mapper).
+ - V(oidc-usermodel-attribute-mapper).
+ - V(oidc-usermodel-client-role-mapper).
+ - V(oidc-usermodel-property-mapper).
+ - V(oidc-usermodel-realm-role-mapper).
+ - V(oidc-usersessionmodel-note-mapper).
+ - V(saml-group-membership-mapper).
+ - V(saml-hardcode-attribute-mapper).
+ - V(saml-hardcode-role-mapper).
+ - V(saml-role-list-mapper).
+ - V(saml-role-name-mapper).
+ - V(saml-user-attribute-mapper).
+ - V(saml-user-property-mapper).
+ - V(saml-user-session-note-mapper).
+ - An exhaustive list of available mappers on your installation can be obtained on the admin console by going to
+ Server Info -> Providers and looking under 'protocol-mapper'.
+ type: str
- protocolMapper:
- description:
- - "The Keycloak-internal name of the type of this protocol-mapper. While an exhaustive list is
- impossible to provide since this may be extended through SPIs by the user of Keycloak,
- by default Keycloak as of 3.4 ships with at least:"
- - V(docker-v2-allow-all-mapper)
- - V(oidc-address-mapper)
- - V(oidc-full-name-mapper)
- - V(oidc-group-membership-mapper)
- - V(oidc-hardcoded-claim-mapper)
- - V(oidc-hardcoded-role-mapper)
- - V(oidc-role-name-mapper)
- - V(oidc-script-based-protocol-mapper)
- - V(oidc-sha256-pairwise-sub-mapper)
- - V(oidc-usermodel-attribute-mapper)
- - V(oidc-usermodel-client-role-mapper)
- - V(oidc-usermodel-property-mapper)
- - V(oidc-usermodel-realm-role-mapper)
- - V(oidc-usersessionmodel-note-mapper)
- - V(saml-group-membership-mapper)
- - V(saml-hardcode-attribute-mapper)
- - V(saml-hardcode-role-mapper)
- - V(saml-role-list-mapper)
- - V(saml-role-name-mapper)
- - V(saml-user-attribute-mapper)
- - V(saml-user-property-mapper)
- - V(saml-user-session-note-mapper)
- - An exhaustive list of available mappers on your installation can be obtained on
- the admin console by going to Server Info -> Providers and looking under
- 'protocol-mapper'.
- type: str
+ name:
+ description:
+ - The name of this protocol mapper.
+ type: str
- name:
- description:
- - The name of this protocol mapper.
- type: str
+ id:
+ description:
+ - Usually a UUID specifying the internal ID of this protocol mapper instance.
+ type: str
- id:
- description:
- - Usually a UUID specifying the internal ID of this protocol mapper instance.
- type: str
-
- config:
- description:
- - Dict specifying the configuration options for the protocol mapper; the
- contents differ depending on the value of O(protocol_mappers[].protocolMapper) and are not documented
- other than by the source of the mappers and its parent class(es). An example is given
- below. It is easiest to obtain valid config values by dumping an already-existing
- protocol mapper configuration through check-mode in the RV(existing) return value.
- type: dict
-
- attributes:
+ config:
+ description:
+ - Dict specifying the configuration options for the protocol mapper; the contents differ depending on the value
+ of O(protocol_mappers[].protocolMapper) and are not documented other than by the source of the mappers and its
+ parent class(es). An example is given below. It is easiest to obtain valid config values by dumping an already-existing
+ protocol mapper configuration through check-mode in the RV(existing) return value.
type: dict
- description:
- - A dict of key/value pairs to set as custom attributes for the client_scope.
- - Values may be single values (for example a string) or a list of strings.
+ attributes:
+ type: dict
+ description:
+ - A dict of key/value pairs to set as custom attributes for the client_scope.
+ - Values may be single values (for example a string) or a list of strings.
extends_documentation_fragment:
- - community.general.keycloak
- - community.general.attributes
+ - community.general.keycloak
+ - community.general.keycloak.actiongroup_keycloak
+ - community.general.attributes
author:
- - Gaëtan Daubresse (@Gaetan2907)
-'''
+ - Gaëtan Daubresse (@Gaetan2907)
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Create a Keycloak client_scopes, authentication with credentials
community.general.keycloak_clientscope:
name: my-new-kc-clientscope
@@ -251,54 +241,54 @@ EXAMPLES = '''
protocol: saml
protocolMapper: saml-role-list-mapper
attributes:
- attrib1: value1
- attrib2: value2
- attrib3:
- - with
- - numerous
- - individual
- - list
- - items
+ attrib1: value1
+ attrib2: value2
+ attrib3:
+ - with
+ - numerous
+ - individual
+ - list
+ - items
delegate_to: localhost
-'''
+"""
-RETURN = '''
+RETURN = r"""
msg:
- description: Message as to what action was taken.
- returned: always
- type: str
- sample: "Client_scope testclientscope has been updated"
+ description: Message as to what action was taken.
+ returned: always
+ type: str
+ sample: "Client_scope testclientscope has been updated"
proposed:
- description: Representation of proposed client scope.
- returned: always
- type: dict
- sample: {
- clientId: "test"
- }
+ description: Representation of proposed client scope.
+ returned: always
+ type: dict
+ sample: {"clientId": "test"}
existing:
- description: Representation of existing client scope (sample is truncated).
- returned: always
- type: dict
- sample: {
- "adminUrl": "http://www.example.com/admin_url",
- "attributes": {
- "request.object.signature.alg": "RS256",
- }
+ description: Representation of existing client scope (sample is truncated).
+ returned: always
+ type: dict
+ sample:
+ {
+ "adminUrl": "http://www.example.com/admin_url",
+ "attributes": {
+ "request.object.signature.alg": "RS256"
+ }
}
end_state:
- description: Representation of client scope after module execution (sample is truncated).
- returned: on success
- type: dict
- sample: {
- "adminUrl": "http://www.example.com/admin_url",
- "attributes": {
- "request.object.signature.alg": "RS256",
- }
+ description: Representation of client scope after module execution (sample is truncated).
+ returned: on success
+ type: dict
+ sample:
+ {
+ "adminUrl": "http://www.example.com/admin_url",
+ "attributes": {
+ "request.object.signature.alg": "RS256"
+ }
}
-'''
+"""
from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, camel, \
keycloak_argument_spec, get_token, KeycloakError, is_struct_included
@@ -376,8 +366,10 @@ def main():
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True,
required_one_of=([['id', 'name'],
- ['token', 'auth_realm', 'auth_username', 'auth_password']]),
- required_together=([['auth_realm', 'auth_username', 'auth_password']]))
+ ['token', 'auth_realm', 'auth_username', 'auth_password', 'auth_client_id', 'auth_client_secret']]),
+ required_together=([['auth_username', 'auth_password']]),
+ required_by={'refresh_token': 'auth_realm'},
+ )
result = dict(changed=False, msg='', diff={}, proposed={}, existing={}, end_state={})
diff --git a/plugins/modules/keycloak_clientscope_type.py b/plugins/modules/keycloak_clientscope_type.py
index 055bb053c5..85308f1a22 100644
--- a/plugins/modules/keycloak_clientscope_type.py
+++ b/plugins/modules/keycloak_clientscope_type.py
@@ -9,27 +9,25 @@ from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
module: keycloak_clientscope_type
-short_description: Set the type of aclientscope in realm or client via Keycloak API
+short_description: Set the type of aclientscope in realm or client using Keycloak API
version_added: 6.6.0
description:
- - This module allows you to set the type (optional, default) of clientscopes
- via the Keycloak REST API. It requires access to the REST API via OpenID
- Connect; the user connecting and the client being used must have the
- requisite access rights. In a default Keycloak installation, admin-cli and
- an admin user would work, as would a separate client definition with the
- scope tailored to your needs and a user having the expected roles.
-
+ - This module allows you to set the type (optional, default) of clientscopes using the Keycloak REST API. It requires access
+ to the REST API using OpenID Connect; the user connecting and the client being used must have the requisite access rights.
+ In a default Keycloak installation, admin-cli and an admin user would work, as would a separate client definition with
+ the scope tailored to your needs and a user having the expected roles.
attributes:
- check_mode:
- support: full
- diff_mode:
- support: full
+ check_mode:
+ support: full
+ diff_mode:
+ support: full
+ action_group:
+ version_added: 10.2.0
options:
realm:
@@ -59,13 +57,14 @@ options:
extends_documentation_fragment:
- community.general.keycloak
+ - community.general.keycloak.actiongroup_keycloak
- community.general.attributes
author:
- Simon Pahl (@simonpahl)
-'''
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Set default client scopes on realm level
community.general.keycloak_clientscope_type:
auth_client_id: admin-cli
@@ -88,42 +87,56 @@ EXAMPLES = '''
default_clientscopes: ['profile', 'roles']
optional_clientscopes: ['phone']
delegate_to: localhost
-'''
+"""
-RETURN = '''
+RETURN = r"""
msg:
- description: Message as to what action was taken.
- returned: always
- type: str
- sample: ""
+ description: Message as to what action was taken.
+ returned: always
+ type: str
+ sample: ""
proposed:
- description: Representation of proposed client-scope types mapping.
- returned: always
- type: dict
- sample: {
- default_clientscopes: ["profile", "role"],
- optional_clientscopes: []
+ description: Representation of proposed client-scope types mapping.
+ returned: always
+ type: dict
+ sample:
+ {
+ "default_clientscopes": [
+ "profile",
+ "role"
+ ],
+ "optional_clientscopes": []
}
existing:
- description:
- - Representation of client scopes before module execution.
- returned: always
- type: dict
- sample: {
- default_clientscopes: ["profile", "role"],
- optional_clientscopes: ["phone"]
+ description:
+ - Representation of client scopes before module execution.
+ returned: always
+ type: dict
+ sample:
+ {
+ "default_clientscopes": [
+ "profile",
+ "role"
+ ],
+ "optional_clientscopes": [
+ "phone"
+ ]
}
end_state:
- description:
- - Representation of client scopes after module execution.
- - The sample is truncated.
- returned: on success
- type: dict
- sample: {
- default_clientscopes: ["profile", "role"],
- optional_clientscopes: []
+ description:
+ - Representation of client scopes after module execution.
+ - The sample is truncated.
+ returned: on success
+ type: dict
+ sample:
+ {
+ "default_clientscopes": [
+ "profile",
+ "role"
+ ],
+ "optional_clientscopes": []
}
-'''
+"""
from ansible.module_utils.basic import AnsibleModule
@@ -155,15 +168,17 @@ def keycloak_clientscope_type_module():
argument_spec=argument_spec,
supports_check_mode=True,
required_one_of=([
- ['token', 'auth_realm', 'auth_username', 'auth_password'],
+ ['token', 'auth_realm', 'auth_username', 'auth_password', 'auth_client_id', 'auth_client_secret'],
['default_clientscopes', 'optional_clientscopes']
]),
- required_together=([['auth_realm', 'auth_username', 'auth_password']]),
+ required_together=([['auth_username', 'auth_password']]),
+ required_by={'refresh_token': 'auth_realm'},
mutually_exclusive=[
['token', 'auth_realm'],
['token', 'auth_username'],
['token', 'auth_password']
- ])
+ ],
+ )
return module
diff --git a/plugins/modules/keycloak_clientsecret_info.py b/plugins/modules/keycloak_clientsecret_info.py
index c772620351..0ea48f6a33 100644
--- a/plugins/modules/keycloak_clientsecret_info.py
+++ b/plugins/modules/keycloak_clientsecret_info.py
@@ -9,28 +9,25 @@ from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
module: keycloak_clientsecret_info
-short_description: Retrieve client secret via Keycloak API
+short_description: Retrieve client secret using Keycloak API
version_added: 6.1.0
description:
- - This module allows you to get a Keycloak client secret via the Keycloak
- REST API. It requires access to the REST API via OpenID Connect; the user
- connecting and the client being used must have the requisite access rights.
- In a default Keycloak installation, admin-cli and an admin user would work,
- as would a separate client definition with the scope tailored to your needs
- and a user having the expected roles.
-
- - When retrieving a new client secret, where possible provide the client's
- O(id) (not O(client_id)) to the module. This removes a lookup to the API to
- translate the O(client_id) into the client ID.
-
- - "Note that this module returns the client secret. To avoid this showing up in the logs,
- please add C(no_log: true) to the task."
+ - This module allows you to get a Keycloak client secret using the Keycloak REST API. It requires access to the REST API
+ using OpenID Connect; the user connecting and the client being used must have the requisite access rights. In a default
+ Keycloak installation, admin-cli and an admin user would work, as would a separate client definition with the scope tailored
+ to your needs and a user having the expected roles.
+ - When retrieving a new client secret, where possible provide the client's O(id) (not O(client_id)) to the module. This
+ removes a lookup to the API to translate the O(client_id) into the client ID.
+ - 'Note that this module returns the client secret. To avoid this showing up in the logs, please add C(no_log: true) to
+ the task.'
+attributes:
+ action_group:
+ version_added: 10.2.0
options:
realm:
@@ -42,14 +39,13 @@ options:
id:
description:
- The unique identifier for this client.
- - This parameter is not required for getting or generating a client secret but
- providing it will reduce the number of API calls required.
+ - This parameter is not required for getting or generating a client secret but providing it reduces the number of API
+ calls required.
type: str
client_id:
description:
- - The O(client_id) of the client. Passing this instead of O(id) results in an
- extra API call.
+ - The O(client_id) of the client. Passing this instead of O(id) results in an extra API call.
aliases:
- clientId
type: str
@@ -57,15 +53,16 @@ options:
extends_documentation_fragment:
- community.general.keycloak
+ - community.general.keycloak.actiongroup_keycloak
- community.general.attributes
- community.general.attributes.info_module
author:
- Fynn Chen (@fynncfchen)
- John Cant (@johncant)
-'''
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Get a Keycloak client secret, authentication with credentials
community.general.keycloak_clientsecret_info:
id: '9d59aa76-2755-48c6-b1af-beb70a82c3cd'
@@ -97,16 +94,16 @@ EXAMPLES = '''
token: TOKEN
delegate_to: localhost
no_log: true
-'''
+"""
-RETURN = '''
+RETURN = r"""
msg:
- description: Textual description of whether we succeeded or failed
+ description: Textual description of whether we succeeded or failed.
returned: always
type: str
clientsecret_info:
- description: Representation of the client secret
+ description: Representation of the client secret.
returned: on success
type: complex
contains:
@@ -120,7 +117,7 @@ clientsecret_info:
type: str
returned: always
sample: cUGnX1EIeTtPPAkcyGMv0ncyqDPu68P1
-'''
+"""
from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import (
KeycloakAPI, KeycloakError, get_token)
diff --git a/plugins/modules/keycloak_clientsecret_regenerate.py b/plugins/modules/keycloak_clientsecret_regenerate.py
index 7e8b295433..2bcaeb3705 100644
--- a/plugins/modules/keycloak_clientsecret_regenerate.py
+++ b/plugins/modules/keycloak_clientsecret_regenerate.py
@@ -9,34 +9,29 @@ from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
module: keycloak_clientsecret_regenerate
-short_description: Regenerate Keycloak client secret via Keycloak API
+short_description: Regenerate Keycloak client secret using Keycloak API
version_added: 6.1.0
description:
- - This module allows you to regenerate a Keycloak client secret via the
- Keycloak REST API. It requires access to the REST API via OpenID Connect;
- the user connecting and the client being used must have the requisite access
- rights. In a default Keycloak installation, admin-cli and an admin user
- would work, as would a separate client definition with the scope tailored to
- your needs and a user having the expected roles.
-
- - When regenerating a client secret, where possible provide the client's id
- (not client_id) to the module. This removes a lookup to the API to
- translate the client_id into the client ID.
-
- - "Note that this module returns the client secret. To avoid this showing up in the logs,
- please add C(no_log: true) to the task."
-
+ - This module allows you to regenerate a Keycloak client secret using the Keycloak REST API. It requires access to the REST
+ API using OpenID Connect; the user connecting and the client being used must have the requisite access rights. In a default
+ Keycloak installation, admin-cli and an admin user would work, as would a separate client definition with the scope tailored
+ to your needs and a user having the expected roles.
+ - When regenerating a client secret, where possible provide the client's ID (not client_id) to the module. This removes
+ a lookup to the API to translate the client_id into the client ID.
+ - 'Note that this module returns the client secret. To avoid this showing up in the logs, please add C(no_log: true) to
+ the task.'
attributes:
check_mode:
support: full
diff_mode:
support: none
+ action_group:
+ version_added: 10.2.0
options:
realm:
@@ -48,14 +43,13 @@ options:
id:
description:
- The unique identifier for this client.
- - This parameter is not required for getting or generating a client secret but
- providing it will reduce the number of API calls required.
+ - This parameter is not required for getting or generating a client secret but providing it reduces the number of API
+ calls required.
type: str
client_id:
description:
- - The client_id of the client. Passing this instead of id results in an
- extra API call.
+ - The client_id of the client. Passing this instead of ID results in an extra API call.
aliases:
- clientId
type: str
@@ -63,14 +57,15 @@ options:
extends_documentation_fragment:
- community.general.keycloak
+ - community.general.keycloak.actiongroup_keycloak
- community.general.attributes
author:
- Fynn Chen (@fynncfchen)
- John Cant (@johncant)
-'''
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Regenerate a Keycloak client secret, authentication with credentials
community.general.keycloak_clientsecret_regenerate:
id: '9d59aa76-2755-48c6-b1af-beb70a82c3cd'
@@ -102,16 +97,16 @@ EXAMPLES = '''
token: TOKEN
delegate_to: localhost
no_log: true
-'''
+"""
-RETURN = '''
+RETURN = r"""
msg:
description: Message as to what action was taken.
returned: always
type: str
end_state:
- description: Representation of the client credential after module execution
+ description: Representation of the client credential after module execution.
returned: on success
type: complex
contains:
@@ -125,8 +120,7 @@ end_state:
type: str
returned: always
sample: cUGnX1EIeTtPPAkcyGMv0ncyqDPu68P1
-
-'''
+"""
from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import (
KeycloakAPI, KeycloakError, get_token)
diff --git a/plugins/modules/keycloak_clienttemplate.py b/plugins/modules/keycloak_clienttemplate.py
index 7bffb5cbb6..ee357605f1 100644
--- a/plugins/modules/keycloak_clienttemplate.py
+++ b/plugins/modules/keycloak_clienttemplate.py
@@ -8,173 +8,165 @@
from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
module: keycloak_clienttemplate
-short_description: Allows administration of Keycloak client templates via Keycloak API
+short_description: Allows administration of Keycloak client templates using Keycloak API
description:
- - This module allows the administration of Keycloak client templates via the Keycloak REST API. It
- requires access to the REST API via OpenID Connect; the user connecting and the client being
- used must have the requisite access rights. In a default Keycloak installation, admin-cli
- and an admin user would work, as would a separate client definition with the scope tailored
- to your needs and a user having the expected roles.
-
- - The names of module options are snake_cased versions of the camelCase ones found in the
- Keycloak API and its documentation at U(https://www.keycloak.org/docs-api/8.0/rest-api/index.html)
-
- - The Keycloak API does not always enforce for only sensible settings to be used -- you can set
- SAML-specific settings on an OpenID Connect client for instance and vice versa. Be careful.
- If you do not specify a setting, usually a sensible default is chosen.
-
+ - This module allows the administration of Keycloak client templates using the Keycloak REST API. It requires access to
+ the REST API using OpenID Connect; the user connecting and the client being used must have the requisite access rights.
+ In a default Keycloak installation, admin-cli and an admin user would work, as would a separate client definition with
+ the scope tailored to your needs and a user having the expected roles.
+ - The names of module options are snake_cased versions of the camelCase ones found in the Keycloak API and its documentation
+ at U(https://www.keycloak.org/docs-api/8.0/rest-api/index.html).
+ - The Keycloak API does not always enforce for only sensible settings to be used -- you can set SAML-specific settings on
+ an OpenID Connect client for instance and the other way around. Be careful. If you do not specify a setting, usually a
+ sensible default is chosen.
attributes:
- check_mode:
- support: full
- diff_mode:
- support: full
+ check_mode:
+ support: full
+ diff_mode:
+ support: full
+ action_group:
+ version_added: 10.2.0
options:
- state:
- description:
- - State of the client template.
- - On V(present), the client template will be created (or updated if it exists already).
- - On V(absent), the client template will be removed if it exists
- choices: ['present', 'absent']
- default: 'present'
- type: str
-
- id:
- description:
- - Id of client template to be worked on. This is usually a UUID.
- type: str
-
- realm:
- description:
- - Realm this client template is found in.
- type: str
- default: master
-
- name:
- description:
- - Name of the client template.
- type: str
-
+ state:
description:
+ - State of the client template.
+ - On V(present), the client template is created (or updated if it exists already).
+ - On V(absent), the client template is removed if it exists.
+ choices: ['present', 'absent']
+ default: 'present'
+ type: str
+
+ id:
+ description:
+ - ID of client template to be worked on. This is usually a UUID.
+ type: str
+
+ realm:
+ description:
+ - Realm this client template is found in.
+ type: str
+ default: master
+
+ name:
+ description:
+ - Name of the client template.
+ type: str
+
+ description:
+ description:
+ - Description of the client template in Keycloak.
+ type: str
+
+ protocol:
+ description:
+ - Type of client template.
+ - The V(docker-v2) value was added in community.general 8.6.0.
+ choices: ['openid-connect', 'saml', 'docker-v2']
+ type: str
+
+ full_scope_allowed:
+ description:
+ - Is the "Full Scope Allowed" feature set for this client template or not. This is C(fullScopeAllowed) in the Keycloak
+ REST API.
+ type: bool
+
+ protocol_mappers:
+ description:
+ - A list of dicts defining protocol mappers for this client template. This is C(protocolMappers) in the Keycloak REST
+ API.
+ type: list
+ elements: dict
+ suboptions:
+ consentRequired:
description:
- - Description of the client template in Keycloak.
+ - Specifies whether a user needs to provide consent to a client for this mapper to be active.
+ type: bool
+
+ consentText:
+ description:
+ - The human-readable name of the consent the user is presented to accept.
type: str
- protocol:
+ id:
description:
- - Type of client template.
- - The V(docker-v2) value was added in community.general 8.6.0.
+ - Usually a UUID specifying the internal ID of this protocol mapper instance.
+ type: str
+
+ name:
+ description:
+ - The name of this protocol mapper.
+ type: str
+
+ protocol:
+ description:
+ - This specifies for which protocol this protocol mapper is active.
choices: ['openid-connect', 'saml', 'docker-v2']
type: str
- full_scope_allowed:
+ protocolMapper:
description:
- - Is the "Full Scope Allowed" feature set for this client template or not.
- This is 'fullScopeAllowed' in the Keycloak REST API.
- type: bool
+ - 'The Keycloak-internal name of the type of this protocol-mapper. While an exhaustive list is impossible to provide
+ since this may be extended through SPIs by the user of Keycloak, by default Keycloak as of 3.4 ships with at least:'
+ - V(docker-v2-allow-all-mapper).
+ - V(oidc-address-mapper).
+ - V(oidc-full-name-mapper).
+ - V(oidc-group-membership-mapper).
+ - V(oidc-hardcoded-claim-mapper).
+ - V(oidc-hardcoded-role-mapper).
+ - V(oidc-role-name-mapper).
+ - V(oidc-script-based-protocol-mapper).
+ - V(oidc-sha256-pairwise-sub-mapper).
+ - V(oidc-usermodel-attribute-mapper).
+ - V(oidc-usermodel-client-role-mapper).
+ - V(oidc-usermodel-property-mapper).
+ - V(oidc-usermodel-realm-role-mapper).
+ - V(oidc-usersessionmodel-note-mapper).
+ - V(saml-group-membership-mapper).
+ - V(saml-hardcode-attribute-mapper).
+ - V(saml-hardcode-role-mapper).
+ - V(saml-role-list-mapper).
+ - V(saml-role-name-mapper).
+ - V(saml-user-attribute-mapper).
+ - V(saml-user-property-mapper).
+ - V(saml-user-session-note-mapper).
+ - An exhaustive list of available mappers on your installation can be obtained on the admin console by going to
+ Server Info -> Providers and looking under 'protocol-mapper'.
+ type: str
- protocol_mappers:
+ config:
description:
- - a list of dicts defining protocol mappers for this client template.
- This is 'protocolMappers' in the Keycloak REST API.
- type: list
- elements: dict
- suboptions:
- consentRequired:
- description:
- - Specifies whether a user needs to provide consent to a client for this mapper to be active.
- type: bool
-
- consentText:
- description:
- - The human-readable name of the consent the user is presented to accept.
- type: str
-
- id:
- description:
- - Usually a UUID specifying the internal ID of this protocol mapper instance.
- type: str
-
- name:
- description:
- - The name of this protocol mapper.
- type: str
-
- protocol:
- description:
- - This specifies for which protocol this protocol mapper is active.
- choices: ['openid-connect', 'saml', 'docker-v2']
- type: str
-
- protocolMapper:
- description:
- - "The Keycloak-internal name of the type of this protocol-mapper. While an exhaustive list is
- impossible to provide since this may be extended through SPIs by the user of Keycloak,
- by default Keycloak as of 3.4 ships with at least:"
- - V(docker-v2-allow-all-mapper)
- - V(oidc-address-mapper)
- - V(oidc-full-name-mapper)
- - V(oidc-group-membership-mapper)
- - V(oidc-hardcoded-claim-mapper)
- - V(oidc-hardcoded-role-mapper)
- - V(oidc-role-name-mapper)
- - V(oidc-script-based-protocol-mapper)
- - V(oidc-sha256-pairwise-sub-mapper)
- - V(oidc-usermodel-attribute-mapper)
- - V(oidc-usermodel-client-role-mapper)
- - V(oidc-usermodel-property-mapper)
- - V(oidc-usermodel-realm-role-mapper)
- - V(oidc-usersessionmodel-note-mapper)
- - V(saml-group-membership-mapper)
- - V(saml-hardcode-attribute-mapper)
- - V(saml-hardcode-role-mapper)
- - V(saml-role-list-mapper)
- - V(saml-role-name-mapper)
- - V(saml-user-attribute-mapper)
- - V(saml-user-property-mapper)
- - V(saml-user-session-note-mapper)
- - An exhaustive list of available mappers on your installation can be obtained on
- the admin console by going to Server Info -> Providers and looking under
- 'protocol-mapper'.
- type: str
-
- config:
- description:
- - Dict specifying the configuration options for the protocol mapper; the
- contents differ depending on the value of O(protocol_mappers[].protocolMapper) and are not documented
- other than by the source of the mappers and its parent class(es). An example is given
- below. It is easiest to obtain valid config values by dumping an already-existing
- protocol mapper configuration through check-mode in the RV(existing) field.
- type: dict
-
- attributes:
- description:
- - A dict of further attributes for this client template. This can contain various
- configuration settings, though in the default installation of Keycloak as of 3.4, none
- are documented or known, so this is usually empty.
+ - Dict specifying the configuration options for the protocol mapper; the contents differ depending on the value
+ of O(protocol_mappers[].protocolMapper) and are not documented other than by the source of the mappers and its
+ parent class(es). An example is given below. It is easiest to obtain valid config values by dumping an already-existing
+ protocol mapper configuration through check-mode in the RV(existing) field.
type: dict
-notes:
- - The Keycloak REST API defines further fields (namely C(bearerOnly), C(consentRequired), C(standardFlowEnabled),
- C(implicitFlowEnabled), C(directAccessGrantsEnabled), C(serviceAccountsEnabled), C(publicClient), and
- C(frontchannelLogout)) which, while available with keycloak_client, do not have any effect on
- Keycloak client-templates and are discarded if supplied with an API request changing client-templates. As such,
- they are not available through this module.
+ attributes:
+ description:
+ - A dict of further attributes for this client template. This can contain various configuration settings, though in
+ the default installation of Keycloak as of 3.4, none are documented or known, so this is usually empty.
+ type: dict
+notes:
+ - The Keycloak REST API defines further fields (namely C(bearerOnly), C(consentRequired), C(standardFlowEnabled), C(implicitFlowEnabled),
+ C(directAccessGrantsEnabled), C(serviceAccountsEnabled), C(publicClient), and C(frontchannelLogout)) which, while available
+ with keycloak_client, do not have any effect on Keycloak client-templates and are discarded if supplied with an API request
+ changing client-templates. As such, they are not available through this module.
extends_documentation_fragment:
- - community.general.keycloak
- - community.general.attributes
+ - community.general.keycloak
+ - community.general.keycloak.actiongroup_keycloak
+ - community.general.attributes
author:
- - Eike Frost (@eikef)
-'''
+ - Eike Frost (@eikef)
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Create or update Keycloak client template (minimal), authentication with credentials
community.general.keycloak_client:
auth_client_id: admin-cli
@@ -233,47 +225,47 @@ EXAMPLES = '''
full_scope_allowed: false
id: bce6f5e9-d7d3-4955-817e-c5b7f8d65b3f
delegate_to: localhost
-'''
+"""
-RETURN = '''
+RETURN = r"""
msg:
- description: Message as to what action was taken.
- returned: always
- type: str
- sample: "Client template testclient has been updated"
+ description: Message as to what action was taken.
+ returned: always
+ type: str
+ sample: "Client template testclient has been updated"
proposed:
- description: Representation of proposed client template.
- returned: always
- type: dict
- sample: {
- name: "test01"
- }
+ description: Representation of proposed client template.
+ returned: always
+ type: dict
+ sample: {"name": "test01"}
existing:
- description: Representation of existing client template (sample is truncated).
- returned: always
- type: dict
- sample: {
- "description": "test01",
- "fullScopeAllowed": false,
- "id": "9c3712ab-decd-481e-954f-76da7b006e5f",
- "name": "test01",
- "protocol": "saml"
+ description: Representation of existing client template (sample is truncated).
+ returned: always
+ type: dict
+ sample:
+ {
+ "description": "test01",
+ "fullScopeAllowed": false,
+ "id": "9c3712ab-decd-481e-954f-76da7b006e5f",
+ "name": "test01",
+ "protocol": "saml"
}
end_state:
- description: Representation of client template after module execution (sample is truncated).
- returned: on success
- type: dict
- sample: {
- "description": "test01",
- "fullScopeAllowed": false,
- "id": "9c3712ab-decd-481e-954f-76da7b006e5f",
- "name": "test01",
- "protocol": "saml"
+ description: Representation of client template after module execution (sample is truncated).
+ returned: on success
+ type: dict
+ sample:
+ {
+ "description": "test01",
+ "fullScopeAllowed": false,
+ "id": "9c3712ab-decd-481e-954f-76da7b006e5f",
+ "name": "test01",
+ "protocol": "saml"
}
-'''
+"""
from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, camel, \
keycloak_argument_spec, get_token, KeycloakError
@@ -316,8 +308,10 @@ def main():
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True,
required_one_of=([['id', 'name'],
- ['token', 'auth_realm', 'auth_username', 'auth_password']]),
- required_together=([['auth_realm', 'auth_username', 'auth_password']]))
+ ['token', 'auth_realm', 'auth_username', 'auth_password', 'auth_client_id', 'auth_client_secret']]),
+ required_together=([['auth_username', 'auth_password']]),
+ required_by={'refresh_token': 'auth_realm'},
+ )
result = dict(changed=False, msg='', diff={}, proposed={}, existing={}, end_state={})
diff --git a/plugins/modules/keycloak_component.py b/plugins/modules/keycloak_component.py
new file mode 100644
index 0000000000..8b0c67b321
--- /dev/null
+++ b/plugins/modules/keycloak_component.py
@@ -0,0 +1,324 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2024, Björn Bösel
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or
+# https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r"""
+module: keycloak_component
+
+short_description: Allows administration of Keycloak components using Keycloak API
+
+version_added: 10.0.0
+
+description:
+ - This module allows the administration of Keycloak components using the Keycloak REST API. It requires access to the REST
+ API using OpenID Connect; the user connecting and the realm being used must have the requisite access rights. In a default
+ Keycloak installation, C(admin-cli) and an C(admin) user would work, as would a separate realm definition with the scope
+ tailored to your needs and a user having the expected roles.
+ - The names of module options are snake_cased versions of the camelCase ones found in the Keycloak API and its documentation
+ at U(https://www.keycloak.org/docs-api/latest/rest-api/index.html). Aliases are provided so camelCased versions can be
+ used as well.
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: full
+ action_group:
+ version_added: 10.2.0
+
+options:
+ state:
+ description:
+ - State of the Keycloak component.
+ - On V(present), the component is created (or updated if it exists already).
+ - On V(absent), the component is removed if it exists.
+ choices: ['present', 'absent']
+ default: 'present'
+ type: str
+ name:
+ description:
+ - Name of the component to create.
+ type: str
+ required: true
+ parent_id:
+ description:
+ - The parent_id of the component. In practice the ID (name) of the realm.
+ type: str
+ required: true
+ provider_id:
+ description:
+ - The name of the "provider ID" for the key.
+ type: str
+ required: true
+ provider_type:
+ description:
+ - The name of the "provider type" for the key. That is, V(org.keycloak.storage.UserStorageProvider), V(org.keycloak.userprofile.UserProfileProvider),
+ ...
+ - See U(https://www.keycloak.org/docs/latest/server_development/index.html#_providers).
+ type: str
+ required: true
+ config:
+ description:
+ - Configuration properties for the provider.
+ - Contents vary depending on the provider type.
+ type: dict
+
+extends_documentation_fragment:
+ - community.general.keycloak
+ - community.general.keycloak.actiongroup_keycloak
+ - community.general.attributes
+
+author:
+ - Björn Bösel (@fivetide)
+"""
+
+EXAMPLES = r"""
+- name: Manage Keycloak User Storage Provider
+ community.general.keycloak_component:
+ auth_keycloak_url: http://localhost:8080/auth
+ auth_username: keycloak
+ auth_password: keycloak
+ auth_realm: master
+ name: my storage provider
+ state: present
+ parent_id: some_realm
+ provider_id: my storage
+ provider_type: "org.keycloak.storage.UserStorageProvider"
+ config:
+ myCustomKey: "my_custom_key"
+ cachePolicy: "NO_CACHE"
+ enabled: true
+"""
+
+RETURN = r"""
+end_state:
+ description: Representation of the keycloak_component after module execution.
+ returned: on success
+ type: dict
+ contains:
+ id:
+ description: ID of the component.
+ type: str
+ returned: when O(state=present)
+ sample: 5b7ec13f-99da-46ad-8326-ab4c73cf4ce4
+ name:
+ description: Name of the component.
+ type: str
+ returned: when O(state=present)
+ sample: mykey
+ parentId:
+ description: ID of the realm this key belongs to.
+ type: str
+ returned: when O(state=present)
+ sample: myrealm
+ providerId:
+ description: The ID of the key provider.
+ type: str
+ returned: when O(state=present)
+ sample: rsa
+ providerType:
+ description: The type of provider.
+ type: str
+ returned: when O(state=present)
+ config:
+ description: Component configuration.
+ type: dict
+"""
+
+from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, camel, \
+ keycloak_argument_spec, get_token, KeycloakError
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves.urllib.parse import urlencode
+from copy import deepcopy
+
+
+def main():
+ argument_spec = keycloak_argument_spec()
+
+ meta_args = dict(
+ state=dict(type='str', default='present', choices=['present', 'absent']),
+ name=dict(type='str', required=True),
+ parent_id=dict(type='str', required=True),
+ provider_id=dict(type='str', required=True),
+ provider_type=dict(type='str', required=True),
+ config=dict(
+ type='dict',
+ )
+ )
+
+ argument_spec.update(meta_args)
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_one_of=([['token', 'auth_realm', 'auth_username', 'auth_password', 'auth_client_id', 'auth_client_secret']]),
+ required_together=([['auth_username', 'auth_password']]),
+ required_by={'refresh_token': 'auth_realm'},
+ )
+
+ result = dict(changed=False, msg='', end_state={}, diff=dict(before={}, after={}))
+
+ # This will include the current state of the component if it is already
+ # present. This is only used for diff-mode.
+ before_component = {}
+ before_component['config'] = {}
+
+ # Obtain access token, initialize API
+ try:
+ connection_header = get_token(module.params)
+ except KeycloakError as e:
+ module.fail_json(msg=str(e))
+
+ kc = KeycloakAPI(module, connection_header)
+
+ params_to_ignore = list(keycloak_argument_spec().keys()) + ["state", "parent_id"]
+
+ # Filter and map the parameters names that apply to the role
+ component_params = [x for x in module.params
+ if x not in params_to_ignore and
+ module.params.get(x) is not None]
+
+ provider_type = module.params.get("provider_type")
+
+ # Build a proposed changeset from parameters given to this module
+ changeset = {}
+ changeset['config'] = {}
+
+ # Generate a JSON payload for Keycloak Admin API from the module
+ # parameters. Parameters that do not belong to the JSON payload (e.g.
+ # "state" or "auth_keycloal_url") have been filtered away earlier (see
+ # above).
+ #
+ # This loop converts Ansible module parameters (snake-case) into
+ # Keycloak-compatible format (camel-case). For example private_key
+ # becomes privateKey.
+ #
+ # It also converts bool, str and int parameters into lists with a single
+ # entry of 'str' type. Bool values are also lowercased. This is required
+ # by Keycloak.
+ #
+ for component_param in component_params:
+ if component_param == 'config':
+ for config_param in module.params.get('config'):
+ changeset['config'][camel(config_param)] = []
+ raw_value = module.params.get('config')[config_param]
+ if isinstance(raw_value, bool):
+ value = str(raw_value).lower()
+ else:
+ value = str(raw_value)
+
+ changeset['config'][camel(config_param)].append(value)
+ else:
+ # No need for camelcase in here as these are one word parameters
+ new_param_value = module.params.get(component_param)
+ changeset[camel(component_param)] = new_param_value
+
+ # Make a deep copy of the changeset. This is use when determining
+ # changes to the current state.
+ changeset_copy = deepcopy(changeset)
+
+ # Make it easier to refer to current module parameters
+ name = module.params.get('name')
+ force = module.params.get('force')
+ state = module.params.get('state')
+ enabled = module.params.get('enabled')
+ provider_id = module.params.get('provider_id')
+ provider_type = module.params.get('provider_type')
+ parent_id = module.params.get('parent_id')
+
+ # Get a list of all Keycloak components that are of keyprovider type.
+ current_components = kc.get_components(urlencode(dict(type=provider_type)), parent_id)
+
+ # If this component is present get its key ID. Confusingly the key ID is
+ # also known as the Provider ID.
+ component_id = None
+
+ # Track individual parameter changes
+ changes = ""
+
+ # This tells Ansible whether the key was changed (added, removed, modified)
+ result['changed'] = False
+
+ # Loop through the list of components. If we encounter a component whose
+ # name matches the value of the name parameter then assume the key is
+ # already present.
+ for component in current_components:
+ if component['name'] == name:
+ component_id = component['id']
+ changeset['id'] = component_id
+ changeset_copy['id'] = component_id
+
+ # Compare top-level parameters
+ for param, value in changeset.items():
+ before_component[param] = component[param]
+
+ if changeset_copy[param] != component[param] and param != 'config':
+ changes += "%s: %s -> %s, " % (param, component[param], changeset_copy[param])
+ result['changed'] = True
+ # Compare parameters under the "config" key
+ for p, v in changeset_copy['config'].items():
+ try:
+ before_component['config'][p] = component['config'][p] or []
+ except KeyError:
+ before_component['config'][p] = []
+ if changeset_copy['config'][p] != component['config'][p]:
+ changes += "config.%s: %s -> %s, " % (p, component['config'][p], changeset_copy['config'][p])
+ result['changed'] = True
+
+ # Check all the possible states of the resource and do what is needed to
+ # converge current state with desired state (create, update or delete
+ # the key).
+ if component_id and state == 'present':
+ if result['changed']:
+ if module._diff:
+ result['diff'] = dict(before=before_component, after=changeset_copy)
+
+ if module.check_mode:
+ result['msg'] = "Component %s would be changed: %s" % (name, changes.strip(", "))
+ else:
+ kc.update_component(changeset, parent_id)
+ result['msg'] = "Component %s changed: %s" % (name, changes.strip(", "))
+ else:
+ result['msg'] = "Component %s was in sync" % (name)
+
+ result['end_state'] = changeset_copy
+ elif component_id and state == 'absent':
+ if module._diff:
+ result['diff'] = dict(before=before_component, after={})
+
+ if module.check_mode:
+ result['changed'] = True
+ result['msg'] = "Component %s would be deleted" % (name)
+ else:
+ kc.delete_component(component_id, parent_id)
+ result['changed'] = True
+ result['msg'] = "Component %s deleted" % (name)
+
+ result['end_state'] = {}
+ elif not component_id and state == 'present':
+ if module._diff:
+ result['diff'] = dict(before={}, after=changeset_copy)
+
+ if module.check_mode:
+ result['changed'] = True
+ result['msg'] = "Component %s would be created" % (name)
+ else:
+ kc.create_component(changeset, parent_id)
+ result['changed'] = True
+ result['msg'] = "Component %s created" % (name)
+
+ result['end_state'] = changeset_copy
+ elif not component_id and state == 'absent':
+ result['changed'] = False
+ result['msg'] = "Component %s not present" % (name)
+ result['end_state'] = {}
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/plugins/modules/keycloak_component_info.py b/plugins/modules/keycloak_component_info.py
index a788735d98..79a6d58720 100644
--- a/plugins/modules/keycloak_component_info.py
+++ b/plugins/modules/keycloak_component_info.py
@@ -8,100 +8,98 @@
from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
module: keycloak_component_info
-short_description: Retrive component info in Keycloak
+short_description: Retrieve component info in Keycloak
version_added: 8.2.0
description:
- - This module retrive information on component from Keycloak.
+ - This module retrieve information on component from Keycloak.
+attributes:
+ action_group:
+ version_added: 10.2.0
+
options:
- realm:
- description:
- - The name of the realm.
- required: true
- type: str
- name:
- description:
- - Name of the Component.
- type: str
- provider_type:
- description:
- - Provider type of components.
- - "Example:
- V(org.keycloak.storage.UserStorageProvider),
- V(org.keycloak.services.clientregistration.policy.ClientRegistrationPolicy),
- V(org.keycloak.keys.KeyProvider),
- V(org.keycloak.userprofile.UserProfileProvider),
- V(org.keycloak.storage.ldap.mappers.LDAPStorageMapper)."
- type: str
- parent_id:
- description:
- - Container ID of the components.
- type: str
+ realm:
+ description:
+ - The name of the realm.
+ required: true
+ type: str
+ name:
+ description:
+ - Name of the Component.
+ type: str
+ provider_type:
+ description:
+ - Provider type of components.
+ - 'Examples: V(org.keycloak.storage.UserStorageProvider), V(org.keycloak.services.clientregistration.policy.ClientRegistrationPolicy),
+ V(org.keycloak.keys.KeyProvider), V(org.keycloak.userprofile.UserProfileProvider), V(org.keycloak.storage.ldap.mappers.LDAPStorageMapper).'
+ type: str
+ parent_id:
+ description:
+ - Container ID of the components.
+ type: str
extends_documentation_fragment:
- - community.general.keycloak
- - community.general.attributes
- - community.general.attributes.info_module
+ - community.general.keycloak
+ - community.general.keycloak.actiongroup_keycloak
+ - community.general.attributes
+ - community.general.attributes.info_module
author:
- - Andre Desrosiers (@desand01)
-'''
+ - Andre Desrosiers (@desand01)
+"""
-EXAMPLES = '''
- - name: Retrive info of a UserStorageProvider named myldap
- community.general.keycloak_component_info:
- auth_keycloak_url: http://localhost:8080/auth
- auth_sername: admin
- auth_password: password
- auth_realm: master
- realm: myrealm
- name: myldap
- provider_type: org.keycloak.storage.UserStorageProvider
+EXAMPLES = r"""
+- name: Retrive info of a UserStorageProvider named myldap
+ community.general.keycloak_component_info:
+ auth_keycloak_url: http://localhost:8080/auth
+ auth_sername: admin
+ auth_password: password
+ auth_realm: master
+ realm: myrealm
+ name: myldap
+ provider_type: org.keycloak.storage.UserStorageProvider
- - name: Retrive key info component
- community.general.keycloak_component_info:
- auth_keycloak_url: http://localhost:8080/auth
- auth_sername: admin
- auth_password: password
- auth_realm: master
- realm: myrealm
- name: rsa-enc-generated
- provider_type: org.keycloak.keys.KeyProvider
+- name: Retrive key info component
+ community.general.keycloak_component_info:
+ auth_keycloak_url: http://localhost:8080/auth
+ auth_sername: admin
+ auth_password: password
+ auth_realm: master
+ realm: myrealm
+ name: rsa-enc-generated
+ provider_type: org.keycloak.keys.KeyProvider
- - name: Retrive all component from realm master
- community.general.keycloak_component_info:
- auth_keycloak_url: http://localhost:8080/auth
- auth_sername: admin
- auth_password: password
- auth_realm: master
- realm: myrealm
+- name: Retrive all component from realm master
+ community.general.keycloak_component_info:
+ auth_keycloak_url: http://localhost:8080/auth
+ auth_sername: admin
+ auth_password: password
+ auth_realm: master
+ realm: myrealm
- - name: Retrive all sub components of parent component filter by type
- community.general.keycloak_component_info:
- auth_keycloak_url: http://localhost:8080/auth
- auth_sername: admin
- auth_password: password
- auth_realm: master
- realm: myrealm
- parent_id: "075ef2fa-19fc-4a6d-bf4c-249f57365fd2"
- provider_type: "org.keycloak.storage.ldap.mappers.LDAPStorageMapper"
+- name: Retrive all sub components of parent component filter by type
+ community.general.keycloak_component_info:
+ auth_keycloak_url: http://localhost:8080/auth
+ auth_sername: admin
+ auth_password: password
+ auth_realm: master
+ realm: myrealm
+ parent_id: "075ef2fa-19fc-4a6d-bf4c-249f57365fd2"
+ provider_type: "org.keycloak.storage.ldap.mappers.LDAPStorageMapper"
+"""
-
-'''
-
-RETURN = '''
+RETURN = r"""
components:
description: JSON representation of components.
returned: always
type: list
elements: dict
-'''
+"""
from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, \
keycloak_argument_spec, get_token, KeycloakError
diff --git a/plugins/modules/keycloak_group.py b/plugins/modules/keycloak_group.py
index 5398a4b5d0..7053b33a35 100644
--- a/plugins/modules/keycloak_group.py
+++ b/plugins/modules/keycloak_group.py
@@ -8,119 +8,105 @@
from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
module: keycloak_group
-short_description: Allows administration of Keycloak groups via Keycloak API
+short_description: Allows administration of Keycloak groups using Keycloak API
description:
- - This module allows you to add, remove or modify Keycloak groups via the Keycloak REST API.
- It requires access to the REST API via OpenID Connect; the user connecting and the client being
- used must have the requisite access rights. In a default Keycloak installation, admin-cli
- and an admin user would work, as would a separate client definition with the scope tailored
- to your needs and a user having the expected roles.
-
- - The names of module options are snake_cased versions of the camelCase ones found in the
- Keycloak API and its documentation at U(https://www.keycloak.org/docs-api/20.0.2/rest-api/index.html).
-
- - Attributes are multi-valued in the Keycloak API. All attributes are lists of individual values and will
- be returned that way by this module. You may pass single values for attributes when calling the module,
- and this will be translated into a list suitable for the API.
-
- - When updating a group, where possible provide the group ID to the module. This removes a lookup
- to the API to translate the name into the group ID.
-
+ - This module allows you to add, remove or modify Keycloak groups using the Keycloak REST API. It requires access to the
+ REST API using OpenID Connect; the user connecting and the client being used must have the requisite access rights. In
+ a default Keycloak installation, admin-cli and an admin user would work, as would a separate client definition with the
+ scope tailored to your needs and a user having the expected roles.
+ - The names of module options are snake_cased versions of the camelCase ones found in the Keycloak API and its documentation
+ at U(https://www.keycloak.org/docs-api/20.0.2/rest-api/index.html).
+ - Attributes are multi-valued in the Keycloak API. All attributes are lists of individual values and are returned that way
+ by this module. You may pass single values for attributes when calling the module, and this is translated into a list
+ suitable for the API.
+ - When updating a group, where possible provide the group ID to the module. This removes a lookup to the API to translate
+ the name into the group ID.
attributes:
- check_mode:
- support: full
- diff_mode:
- support: full
+ check_mode:
+ support: full
+ diff_mode:
+ support: full
+ action_group:
+ version_added: 10.2.0
options:
- state:
- description:
- - State of the group.
- - On V(present), the group will be created if it does not yet exist, or updated with the parameters you provide.
- - >-
- On V(absent), the group will be removed if it exists. Be aware that absenting
- a group with subgroups will automatically delete all its subgroups too.
- default: 'present'
- type: str
- choices:
- - present
- - absent
+ state:
+ description:
+ - State of the group.
+ - On V(present), the group is created if it does not yet exist, or updated with the parameters you provide.
+ - On V(absent), the group is removed if it exists. Be aware that absenting a group with subgroups automatically deletes
+ all its subgroups too.
+ default: 'present'
+ type: str
+ choices:
+ - present
+ - absent
- name:
+ name:
+ type: str
+ description:
+ - Name of the group.
+ - This parameter is required only when creating or updating the group.
+ realm:
+ type: str
+ description:
+ - They Keycloak realm under which this group resides.
+ default: 'master'
+
+ id:
+ type: str
+ description:
+ - The unique identifier for this group.
+ - This parameter is not required for updating or deleting a group but providing it reduces the number of API calls required.
+ attributes:
+ type: dict
+ description:
+ - A dict of key/value pairs to set as custom attributes for the group.
+ - Values may be single values (for example a string) or a list of strings.
+ parents:
+ version_added: "6.4.0"
+ type: list
+ description:
+ - List of parent groups for the group to handle sorted top to bottom.
+ - Set this to create a group as a subgroup of another group or groups (parents) or when accessing an existing subgroup
+ by name.
+ - Not necessary to set when accessing an existing subgroup by its C(ID) because in that case the group can be directly
+ queried without necessarily knowing its parent(s).
+ elements: dict
+ suboptions:
+ id:
type: str
description:
- - Name of the group.
- - This parameter is required only when creating or updating the group.
-
- realm:
+ - Identify parent by ID.
+ - Needs less API calls than using O(parents[].name).
+ - A deep parent chain can be started at any point when first given parent is given as ID.
+ - Note that in principle both ID and name can be specified at the same time but current implementation only always
+ use just one of them, with ID being preferred.
+ name:
type: str
description:
- - They Keycloak realm under which this group resides.
- default: 'master'
-
- id:
- type: str
- description:
- - The unique identifier for this group.
- - This parameter is not required for updating or deleting a group but
- providing it will reduce the number of API calls required.
-
- attributes:
- type: dict
- description:
- - A dict of key/value pairs to set as custom attributes for the group.
- - Values may be single values (e.g. a string) or a list of strings.
-
- parents:
- version_added: "6.4.0"
- type: list
- description:
- - List of parent groups for the group to handle sorted top to bottom.
- - >-
- Set this to create a group as a subgroup of another group or groups (parents) or
- when accessing an existing subgroup by name.
- - >-
- Not necessary to set when accessing an existing subgroup by its C(ID) because in
- that case the group can be directly queried without necessarily knowing its parent(s).
- elements: dict
- suboptions:
- id:
- type: str
- description:
- - Identify parent by ID.
- - Needs less API calls than using O(parents[].name).
- - A deep parent chain can be started at any point when first given parent is given as ID.
- - Note that in principle both ID and name can be specified at the same time
- but current implementation only always use just one of them, with ID
- being preferred.
- name:
- type: str
- description:
- - Identify parent by name.
- - Needs more internal API calls than using O(parents[].id) to map names to ID's under the hood.
- - When giving a parent chain with only names it must be complete up to the top.
- - Note that in principle both ID and name can be specified at the same time
- but current implementation only always use just one of them, with ID
- being preferred.
-
+ - Identify parent by name.
+ - Needs more internal API calls than using O(parents[].id) to map names to ID's under the hood.
+ - When giving a parent chain with only names it must be complete up to the top.
+ - Note that in principle both ID and name can be specified at the same time but current implementation only always
+ use just one of them, with ID being preferred.
notes:
- - Presently, the RV(end_state.realmRoles), RV(end_state.clientRoles), and RV(end_state.access) attributes returned by the Keycloak API
- are read-only for groups. This limitation will be removed in a later version of this module.
-
+ - Presently, the RV(end_state.realmRoles), RV(end_state.clientRoles), and RV(end_state.access) attributes returned by the
+ Keycloak API are read-only for groups. This limitation will be removed in a later version of this module.
extends_documentation_fragment:
- - community.general.keycloak
- - community.general.attributes
+ - community.general.keycloak
+ - community.general.keycloak.actiongroup_keycloak
+ - community.general.attributes
author:
- - Adam Goossens (@adamgoossens)
-'''
+ - Adam Goossens (@adamgoossens)
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Create a Keycloak group, authentication with credentials
community.general.keycloak_group:
name: my-new-kc-group
@@ -188,14 +174,14 @@ EXAMPLES = '''
auth_password: PASSWORD
name: my-new_group
attributes:
- attrib1: value1
- attrib2: value2
- attrib3:
- - with
- - numerous
- - individual
- - list
- - items
+ attrib1: value1
+ attrib2: value2
+ attrib3:
+ - with
+ - numerous
+ - individual
+ - list
+ - items
delegate_to: localhost
- name: Create a Keycloak subgroup of a base group (using parent name)
@@ -255,64 +241,63 @@ EXAMPLES = '''
parents:
- id: "{{ result_new_kcgrp_sub.end_state.id }}"
delegate_to: localhost
-'''
+"""
-RETURN = '''
+RETURN = r"""
msg:
- description: Message as to what action was taken.
- returned: always
- type: str
+ description: Message as to what action was taken.
+ returned: always
+ type: str
end_state:
- description: Representation of the group after module execution (sample is truncated).
- returned: on success
- type: complex
- contains:
- id:
- description: GUID that identifies the group.
- type: str
- returned: always
- sample: 23f38145-3195-462c-97e7-97041ccea73e
- name:
- description: Name of the group.
- type: str
- returned: always
- sample: grp-test-123
- attributes:
- description: Attributes applied to this group.
- type: dict
- returned: always
- sample:
- attr1: ["val1", "val2", "val3"]
- path:
- description: URI path to the group.
- type: str
- returned: always
- sample: /grp-test-123
- realmRoles:
- description: An array of the realm-level roles granted to this group.
- type: list
- returned: always
- sample: []
- subGroups:
- description: A list of groups that are children of this group. These groups will have the same parameters as
- documented here.
- type: list
- returned: always
- clientRoles:
- description: A list of client-level roles granted to this group.
- type: list
- returned: always
- sample: []
- access:
- description: A dict describing the accesses you have to this group based on the credentials used.
- type: dict
- returned: always
- sample:
- manage: true
- manageMembership: true
- view: true
-'''
+ description: Representation of the group after module execution (sample is truncated).
+ returned: on success
+ type: complex
+ contains:
+ id:
+ description: GUID that identifies the group.
+ type: str
+ returned: always
+ sample: 23f38145-3195-462c-97e7-97041ccea73e
+ name:
+ description: Name of the group.
+ type: str
+ returned: always
+ sample: grp-test-123
+ attributes:
+ description: Attributes applied to this group.
+ type: dict
+ returned: always
+ sample:
+ attr1: ["val1", "val2", "val3"]
+ path:
+ description: URI path to the group.
+ type: str
+ returned: always
+ sample: /grp-test-123
+ realmRoles:
+ description: An array of the realm-level roles granted to this group.
+ type: list
+ returned: always
+ sample: []
+ subGroups:
+ description: A list of groups that are children of this group. These groups have the same parameters as documented here.
+ type: list
+ returned: always
+ clientRoles:
+ description: A list of client-level roles granted to this group.
+ type: list
+ returned: always
+ sample: []
+ access:
+ description: A dict describing the accesses you have to this group based on the credentials used.
+ type: dict
+ returned: always
+ sample:
+ manage: true
+ manageMembership: true
+ view: true
+"""
from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, camel, \
keycloak_argument_spec, get_token, KeycloakError
@@ -347,8 +332,10 @@ def main():
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True,
required_one_of=([['id', 'name'],
- ['token', 'auth_realm', 'auth_username', 'auth_password']]),
- required_together=([['auth_realm', 'auth_username', 'auth_password']]))
+ ['token', 'auth_realm', 'auth_username', 'auth_password', 'auth_client_id', 'auth_client_secret']]),
+ required_together=([['auth_username', 'auth_password']]),
+ required_by={'refresh_token': 'auth_realm'},
+ )
result = dict(changed=False, msg='', diff={}, group='')
@@ -369,7 +356,7 @@ def main():
parents = module.params.get('parents')
# attributes in Keycloak have their values returned as lists
- # via the API. attributes is a dict, so we'll transparently convert
+ # using the API. attributes is a dict, so we'll transparently convert
# the values to lists.
if attributes is not None:
for key, val in module.params['attributes'].items():
diff --git a/plugins/modules/keycloak_identity_provider.py b/plugins/modules/keycloak_identity_provider.py
index 609673653b..40a06846d6 100644
--- a/plugins/modules/keycloak_identity_provider.py
+++ b/plugins/modules/keycloak_identity_provider.py
@@ -8,282 +8,281 @@
from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
module: keycloak_identity_provider
-short_description: Allows administration of Keycloak identity providers via Keycloak API
+short_description: Allows administration of Keycloak identity providers using Keycloak API
version_added: 3.6.0
description:
- - This module allows you to add, remove or modify Keycloak identity providers via the Keycloak REST API.
- It requires access to the REST API via OpenID Connect; the user connecting and the client being
- used must have the requisite access rights. In a default Keycloak installation, admin-cli
- and an admin user would work, as would a separate client definition with the scope tailored
- to your needs and a user having the expected roles.
-
- - The names of module options are snake_cased versions of the camelCase ones found in the
- Keycloak API and its documentation at U(https://www.keycloak.org/docs-api/15.0/rest-api/index.html).
-
+ - This module allows you to add, remove or modify Keycloak identity providers using the Keycloak REST API. It requires access
+ to the REST API using OpenID Connect; the user connecting and the client being used must have the requisite access rights.
+ In a default Keycloak installation, admin-cli and an admin user would work, as would a separate client definition with
+ the scope tailored to your needs and a user having the expected roles.
+ - The names of module options are snake_cased versions of the camelCase ones found in the Keycloak API and its documentation
+ at U(https://www.keycloak.org/docs-api/15.0/rest-api/index.html).
attributes:
- check_mode:
- support: full
- diff_mode:
- support: full
+ check_mode:
+ support: full
+ diff_mode:
+ support: full
+ action_group:
+ version_added: 10.2.0
options:
- state:
- description:
- - State of the identity provider.
- - On V(present), the identity provider will be created if it does not yet exist, or updated with the parameters you provide.
- - On V(absent), the identity provider will be removed if it exists.
- default: 'present'
- type: str
- choices:
- - present
- - absent
+ state:
+ description:
+ - State of the identity provider.
+ - On V(present), the identity provider is created if it does not yet exist, or updated with the parameters you provide.
+ - On V(absent), the identity provider is removed if it exists.
+ default: 'present'
+ type: str
+ choices:
+ - present
+ - absent
- realm:
- description:
- - The Keycloak realm under which this identity provider resides.
- default: 'master'
- type: str
+ realm:
+ description:
+ - The Keycloak realm under which this identity provider resides.
+ default: 'master'
+ type: str
- alias:
- description:
- - The alias uniquely identifies an identity provider and it is also used to build the redirect URI.
- required: true
- type: str
+ alias:
+ description:
+ - The alias uniquely identifies an identity provider and it is also used to build the redirect URI.
+ required: true
+ type: str
- display_name:
+ display_name:
+ description:
+ - Friendly name for identity provider.
+ aliases:
+ - displayName
+ type: str
+
+ enabled:
+ description:
+ - Enable/disable this identity provider.
+ type: bool
+
+ store_token:
+ description:
+ - Enable/disable whether tokens must be stored after authenticating users.
+ aliases:
+ - storeToken
+ type: bool
+
+ add_read_token_role_on_create:
+ description:
+ - Enable/disable whether new users can read any stored tokens. This assigns the C(broker.read-token) role.
+ aliases:
+ - addReadTokenRoleOnCreate
+ type: bool
+
+ trust_email:
+ description:
+ - If enabled, email provided by this provider is not verified even if verification is enabled for the realm.
+ aliases:
+ - trustEmail
+ type: bool
+
+ link_only:
+ description:
+ - If true, users cannot log in through this provider. They can only link to this provider. This is useful if you do
+ not want to allow login from the provider, but want to integrate with a provider.
+ aliases:
+ - linkOnly
+ type: bool
+
+ first_broker_login_flow_alias:
+ description:
+ - Alias of authentication flow, which is triggered after first login with this identity provider.
+ aliases:
+ - firstBrokerLoginFlowAlias
+ type: str
+
+ post_broker_login_flow_alias:
+ description:
+ - Alias of authentication flow, which is triggered after each login with this identity provider.
+ aliases:
+ - postBrokerLoginFlowAlias
+ type: str
+
+ authenticate_by_default:
+ description:
+ - Specifies if this identity provider should be used by default for authentication even before displaying login screen.
+ aliases:
+ - authenticateByDefault
+ type: bool
+
+ provider_id:
+ description:
+ - Protocol used by this provider (supported values are V(oidc) or V(saml)).
+ aliases:
+ - providerId
+ type: str
+
+ config:
+ description:
+ - Dict specifying the configuration options for the provider; the contents differ depending on the value of O(provider_id).
+ Examples are given below for V(oidc) and V(saml). It is easiest to obtain valid config values by dumping an already-existing
+ identity provider configuration through check-mode in the RV(existing) field.
+ type: dict
+ suboptions:
+ hide_on_login_page:
description:
- - Friendly name for identity provider.
+ - If hidden, login with this provider is possible only if requested explicitly, for example using the C(kc_idp_hint)
+ parameter.
aliases:
- - displayName
- type: str
-
- enabled:
- description:
- - Enable/disable this identity provider.
+ - hideOnLoginPage
type: bool
- store_token:
+ gui_order:
description:
- - Enable/disable whether tokens must be stored after authenticating users.
+ - Number defining order of the provider in GUI (for example, on Login page).
aliases:
- - storeToken
- type: bool
+ - guiOrder
+ type: int
- add_read_token_role_on_create:
+ sync_mode:
description:
- - Enable/disable whether new users can read any stored tokens. This assigns the C(broker.read-token) role.
+ - Default sync mode for all mappers. The sync mode determines when user data is synced using the mappers.
aliases:
- - addReadTokenRoleOnCreate
- type: bool
-
- trust_email:
- description:
- - If enabled, email provided by this provider is not verified even if verification is enabled for the realm.
- aliases:
- - trustEmail
- type: bool
-
- link_only:
- description:
- - If true, users cannot log in through this provider. They can only link to this provider.
- This is useful if you don't want to allow login from the provider, but want to integrate with a provider.
- aliases:
- - linkOnly
- type: bool
-
- first_broker_login_flow_alias:
- description:
- - Alias of authentication flow, which is triggered after first login with this identity provider.
- aliases:
- - firstBrokerLoginFlowAlias
+ - syncMode
type: str
- post_broker_login_flow_alias:
+ issuer:
description:
- - Alias of authentication flow, which is triggered after each login with this identity provider.
- aliases:
- - postBrokerLoginFlowAlias
+ - The issuer identifier for the issuer of the response. If not provided, no validation is performed.
type: str
- authenticate_by_default:
+ authorizationUrl:
description:
- - Specifies if this identity provider should be used by default for authentication even before displaying login screen.
- aliases:
- - authenticateByDefault
+ - The Authorization URL.
+ type: str
+
+ tokenUrl:
+ description:
+ - The Token URL.
+ type: str
+
+ logoutUrl:
+ description:
+ - End session endpoint to use to logout user from external IDP.
+ type: str
+
+ userInfoUrl:
+ description:
+ - The User Info URL.
+ type: str
+
+ clientAuthMethod:
+ description:
+ - The client authentication method.
+ type: str
+
+ clientId:
+ description:
+ - The client or client identifier registered within the identity provider.
+ type: str
+
+ clientSecret:
+ description:
+ - The client or client secret registered within the identity provider.
+ type: str
+
+ defaultScope:
+ description:
+ - The scopes to be sent when asking for authorization.
+ type: str
+
+ validateSignature:
+ description:
+ - Enable/disable signature validation of external IDP signatures.
type: bool
- provider_id:
+ useJwksUrl:
description:
- - Protocol used by this provider (supported values are V(oidc) or V(saml)).
- aliases:
- - providerId
+ - If V(true), identity provider public keys are downloaded from given JWKS URL.
+ type: bool
+
+ jwksUrl:
+ description:
+ - URL where identity provider keys in JWK format are stored. See JWK specification for more details.
type: str
- config:
+ entityId:
description:
- - Dict specifying the configuration options for the provider; the contents differ depending on the value of O(provider_id).
- Examples are given below for V(oidc) and V(saml). It is easiest to obtain valid config values by dumping an already-existing
- identity provider configuration through check-mode in the RV(existing) field.
+ - The Entity ID that is used to uniquely identify this SAML Service Provider.
+ type: str
+
+ singleSignOnServiceUrl:
+ description:
+ - The URL that must be used to send authentication requests (SAML AuthnRequest).
+ type: str
+
+ singleLogoutServiceUrl:
+ description:
+ - The URL that must be used to send logout requests.
+ type: str
+
+ backchannelSupported:
+ description:
+ - Does the external IDP support backchannel logout?
+ type: str
+
+ nameIDPolicyFormat:
+ description:
+ - Specifies the URI reference corresponding to a name identifier format.
+ type: str
+
+ principalType:
+ description:
+ - Way to identify and track external users from the assertion.
+ type: str
+
+ mappers:
+ description:
+ - A list of dicts defining mappers associated with this Identity Provider.
+ type: list
+ elements: dict
+ suboptions:
+ id:
+ description:
+ - Unique ID of this mapper.
+ type: str
+
+ name:
+ description:
+ - Name of the mapper.
+ type: str
+
+ identityProviderAlias:
+ description:
+ - Alias of the identity provider for this mapper.
+ type: str
+
+ identityProviderMapper:
+ description:
+ - Type of mapper.
+ type: str
+
+ config:
+ description:
+ - Dict specifying the configuration options for the mapper; the contents differ depending on the value of O(mappers[].identityProviderMapper).
type: dict
- suboptions:
- hide_on_login_page:
- description:
- - If hidden, login with this provider is possible only if requested explicitly, for example using the C(kc_idp_hint) parameter.
- aliases:
- - hideOnLoginPage
- type: bool
-
- gui_order:
- description:
- - Number defining order of the provider in GUI (for example, on Login page).
- aliases:
- - guiOrder
- type: int
-
- sync_mode:
- description:
- - Default sync mode for all mappers. The sync mode determines when user data will be synced using the mappers.
- aliases:
- - syncMode
- type: str
-
- issuer:
- description:
- - The issuer identifier for the issuer of the response. If not provided, no validation will be performed.
- type: str
-
- authorizationUrl:
- description:
- - The Authorization URL.
- type: str
-
- tokenUrl:
- description:
- - The Token URL.
- type: str
-
- logoutUrl:
- description:
- - End session endpoint to use to logout user from external IDP.
- type: str
-
- userInfoUrl:
- description:
- - The User Info URL.
- type: str
-
- clientAuthMethod:
- description:
- - The client authentication method.
- type: str
-
- clientId:
- description:
- - The client or client identifier registered within the identity provider.
- type: str
-
- clientSecret:
- description:
- - The client or client secret registered within the identity provider.
- type: str
-
- defaultScope:
- description:
- - The scopes to be sent when asking for authorization.
- type: str
-
- validateSignature:
- description:
- - Enable/disable signature validation of external IDP signatures.
- type: bool
-
- useJwksUrl:
- description:
- - If the switch is on, identity provider public keys will be downloaded from given JWKS URL.
- type: bool
-
- jwksUrl:
- description:
- - URL where identity provider keys in JWK format are stored. See JWK specification for more details.
- type: str
-
- entityId:
- description:
- - The Entity ID that will be used to uniquely identify this SAML Service Provider.
- type: str
-
- singleSignOnServiceUrl:
- description:
- - The URL that must be used to send authentication requests (SAML AuthnRequest).
- type: str
-
- singleLogoutServiceUrl:
- description:
- - The URL that must be used to send logout requests.
- type: str
-
- backchannelSupported:
- description:
- - Does the external IDP support backchannel logout?
- type: str
-
- nameIDPolicyFormat:
- description:
- - Specifies the URI reference corresponding to a name identifier format.
- type: str
-
- principalType:
- description:
- - Way to identify and track external users from the assertion.
- type: str
-
- mappers:
- description:
- - A list of dicts defining mappers associated with this Identity Provider.
- type: list
- elements: dict
- suboptions:
- id:
- description:
- - Unique ID of this mapper.
- type: str
-
- name:
- description:
- - Name of the mapper.
- type: str
-
- identityProviderAlias:
- description:
- - Alias of the identity provider for this mapper.
- type: str
-
- identityProviderMapper:
- description:
- - Type of mapper.
- type: str
-
- config:
- description:
- - Dict specifying the configuration options for the mapper; the contents differ depending on the value of
- O(mappers[].identityProviderMapper).
- type: dict
extends_documentation_fragment:
- - community.general.keycloak
- - community.general.attributes
+ - community.general.keycloak
+ - community.general.keycloak.actiongroup_keycloak
+ - community.general.attributes
author:
- - Laurent Paumier (@laurpaum)
-'''
+ - Laurent Paumier (@laurpaum)
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Create OIDC identity provider, authentication with credentials
community.general.keycloak_identity_provider:
state: present
@@ -344,88 +343,91 @@ EXAMPLES = '''
attribute.friendly.name: User Roles
attribute.name: roles
syncMode: INHERIT
-'''
+"""
-RETURN = '''
+RETURN = r"""
msg:
- description: Message as to what action was taken.
- returned: always
- type: str
- sample: "Identity provider my-idp has been created"
+ description: Message as to what action was taken.
+ returned: always
+ type: str
+ sample: "Identity provider my-idp has been created"
proposed:
- description: Representation of proposed identity provider.
- returned: always
- type: dict
- sample: {
- "config": {
- "authorizationUrl": "https://idp.example.com/auth",
- "clientAuthMethod": "client_secret_post",
- "clientId": "my-client",
- "clientSecret": "secret",
- "issuer": "https://idp.example.com",
- "tokenUrl": "https://idp.example.com/token",
- "userInfoUrl": "https://idp.example.com/userinfo"
- },
- "displayName": "OpenID Connect IdP",
- "providerId": "oidc"
+ description: Representation of proposed identity provider.
+ returned: always
+ type: dict
+ sample:
+ {
+ "config": {
+ "authorizationUrl": "https://idp.example.com/auth",
+ "clientAuthMethod": "client_secret_post",
+ "clientId": "my-client",
+ "clientSecret": "secret",
+ "issuer": "https://idp.example.com",
+ "tokenUrl": "https://idp.example.com/token",
+ "userInfoUrl": "https://idp.example.com/userinfo"
+ },
+ "displayName": "OpenID Connect IdP",
+ "providerId": "oidc"
}
existing:
- description: Representation of existing identity provider.
- returned: always
- type: dict
- sample: {
- "addReadTokenRoleOnCreate": false,
- "alias": "my-idp",
- "authenticateByDefault": false,
- "config": {
- "authorizationUrl": "https://old.example.com/auth",
- "clientAuthMethod": "client_secret_post",
- "clientId": "my-client",
- "clientSecret": "**********",
- "issuer": "https://old.example.com",
- "syncMode": "FORCE",
- "tokenUrl": "https://old.example.com/token",
- "userInfoUrl": "https://old.example.com/userinfo"
- },
- "displayName": "OpenID Connect IdP",
- "enabled": true,
- "firstBrokerLoginFlowAlias": "first broker login",
- "internalId": "4d28d7e3-1b80-45bb-8a30-5822bf55aa1c",
- "linkOnly": false,
- "providerId": "oidc",
- "storeToken": false,
- "trustEmail": false,
+ description: Representation of existing identity provider.
+ returned: always
+ type: dict
+ sample:
+ {
+ "addReadTokenRoleOnCreate": false,
+ "alias": "my-idp",
+ "authenticateByDefault": false,
+ "config": {
+ "authorizationUrl": "https://old.example.com/auth",
+ "clientAuthMethod": "client_secret_post",
+ "clientId": "my-client",
+ "clientSecret": "**********",
+ "issuer": "https://old.example.com",
+ "syncMode": "FORCE",
+ "tokenUrl": "https://old.example.com/token",
+ "userInfoUrl": "https://old.example.com/userinfo"
+ },
+ "displayName": "OpenID Connect IdP",
+ "enabled": true,
+ "firstBrokerLoginFlowAlias": "first broker login",
+ "internalId": "4d28d7e3-1b80-45bb-8a30-5822bf55aa1c",
+ "linkOnly": false,
+ "providerId": "oidc",
+ "storeToken": false,
+ "trustEmail": false
}
end_state:
- description: Representation of identity provider after module execution.
- returned: on success
- type: dict
- sample: {
- "addReadTokenRoleOnCreate": false,
- "alias": "my-idp",
- "authenticateByDefault": false,
- "config": {
- "authorizationUrl": "https://idp.example.com/auth",
- "clientAuthMethod": "client_secret_post",
- "clientId": "my-client",
- "clientSecret": "**********",
- "issuer": "https://idp.example.com",
- "tokenUrl": "https://idp.example.com/token",
- "userInfoUrl": "https://idp.example.com/userinfo"
- },
- "displayName": "OpenID Connect IdP",
- "enabled": true,
- "firstBrokerLoginFlowAlias": "first broker login",
- "internalId": "4d28d7e3-1b80-45bb-8a30-5822bf55aa1c",
- "linkOnly": false,
- "providerId": "oidc",
- "storeToken": false,
- "trustEmail": false,
+ description: Representation of identity provider after module execution.
+ returned: on success
+ type: dict
+ sample:
+ {
+ "addReadTokenRoleOnCreate": false,
+ "alias": "my-idp",
+ "authenticateByDefault": false,
+ "config": {
+ "authorizationUrl": "https://idp.example.com/auth",
+ "clientAuthMethod": "client_secret_post",
+ "clientId": "my-client",
+ "clientSecret": "**********",
+ "issuer": "https://idp.example.com",
+ "tokenUrl": "https://idp.example.com/token",
+ "userInfoUrl": "https://idp.example.com/userinfo"
+ },
+ "displayName": "OpenID Connect IdP",
+ "enabled": true,
+ "firstBrokerLoginFlowAlias": "first broker login",
+ "internalId": "4d28d7e3-1b80-45bb-8a30-5822bf55aa1c",
+ "linkOnly": false,
+ "providerId": "oidc",
+ "storeToken": false,
+ "trustEmail": false
}
-'''
+"""
from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, camel, \
keycloak_argument_spec, get_token, KeycloakError
@@ -497,8 +499,10 @@ def main():
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True,
- required_one_of=([['token', 'auth_realm', 'auth_username', 'auth_password']]),
- required_together=([['auth_realm', 'auth_username', 'auth_password']]))
+ required_one_of=([['token', 'auth_realm', 'auth_username', 'auth_password', 'auth_client_id', 'auth_client_secret']]),
+ required_together=([['auth_username', 'auth_password']]),
+ required_by={'refresh_token': 'auth_realm'},
+ )
result = dict(changed=False, msg='', diff={}, proposed={}, existing={}, end_state={})
diff --git a/plugins/modules/keycloak_realm.py b/plugins/modules/keycloak_realm.py
index 9bbcdb6b1a..c8bc7dc7df 100644
--- a/plugins/modules/keycloak_realm.py
+++ b/plugins/modules/keycloak_realm.py
@@ -9,513 +9,518 @@
from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
module: keycloak_realm
-short_description: Allows administration of Keycloak realm via Keycloak API
+short_description: Allows administration of Keycloak realm using Keycloak API
version_added: 3.0.0
description:
- - This module allows the administration of Keycloak realm via the Keycloak REST API. It
- requires access to the REST API via OpenID Connect; the user connecting and the realm being
- used must have the requisite access rights. In a default Keycloak installation, admin-cli
- and an admin user would work, as would a separate realm definition with the scope tailored
- to your needs and a user having the expected roles.
-
- - The names of module options are snake_cased versions of the camelCase ones found in the
- Keycloak API and its documentation at U(https://www.keycloak.org/docs-api/8.0/rest-api/index.html).
- Aliases are provided so camelCased versions can be used as well.
-
- - The Keycloak API does not always sanity check inputs e.g. you can set
- SAML-specific settings on an OpenID Connect client for instance and vice versa. Be careful.
- If you do not specify a setting, usually a sensible default is chosen.
-
+ - This module allows the administration of Keycloak realm using the Keycloak REST API. It requires access to the REST API
+ using OpenID Connect; the user connecting and the realm being used must have the requisite access rights. In a default
+ Keycloak installation, admin-cli and an admin user would work, as would a separate realm definition with the scope tailored
+ to your needs and a user having the expected roles.
+ - The names of module options are snake_cased versions of the camelCase ones found in the Keycloak API and its documentation
+ at U(https://www.keycloak.org/docs-api/8.0/rest-api/index.html). Aliases are provided so camelCased versions can be used
+ as well.
+ - The Keycloak API does not always sanity check inputs, for example you can set SAML-specific settings on an OpenID Connect
+ client for instance and also the other way around. B(Be careful). If you do not specify a setting, usually a sensible
+ default is chosen.
attributes:
- check_mode:
- support: full
- diff_mode:
- support: full
+ check_mode:
+ support: full
+ diff_mode:
+ support: full
+ action_group:
+ version_added: 10.2.0
options:
- state:
- description:
- - State of the realm.
- - On V(present), the realm will be created (or updated if it exists already).
- - On V(absent), the realm will be removed if it exists.
- choices: ['present', 'absent']
- default: 'present'
- type: str
+ state:
+ description:
+ - State of the realm.
+ - On V(present), the realm is created (or updated if it exists already).
+ - On V(absent), the realm is removed if it exists.
+ choices: ['present', 'absent']
+ default: 'present'
+ type: str
- id:
- description:
- - The realm to create.
- type: str
- realm:
- description:
- - The realm name.
- type: str
- access_code_lifespan:
- description:
- - The realm access code lifespan.
- aliases:
- - accessCodeLifespan
- type: int
- access_code_lifespan_login:
- description:
- - The realm access code lifespan login.
- aliases:
- - accessCodeLifespanLogin
- type: int
- access_code_lifespan_user_action:
- description:
- - The realm access code lifespan user action.
- aliases:
- - accessCodeLifespanUserAction
- type: int
- access_token_lifespan:
- description:
- - The realm access token lifespan.
- aliases:
- - accessTokenLifespan
- type: int
- access_token_lifespan_for_implicit_flow:
- description:
- - The realm access token lifespan for implicit flow.
- aliases:
- - accessTokenLifespanForImplicitFlow
- type: int
- account_theme:
- description:
- - The realm account theme.
- aliases:
- - accountTheme
- type: str
- action_token_generated_by_admin_lifespan:
- description:
- - The realm action token generated by admin lifespan.
- aliases:
- - actionTokenGeneratedByAdminLifespan
- type: int
- action_token_generated_by_user_lifespan:
- description:
- - The realm action token generated by user lifespan.
- aliases:
- - actionTokenGeneratedByUserLifespan
- type: int
- admin_events_details_enabled:
- description:
- - The realm admin events details enabled.
- aliases:
- - adminEventsDetailsEnabled
- type: bool
- admin_events_enabled:
- description:
- - The realm admin events enabled.
- aliases:
- - adminEventsEnabled
- type: bool
- admin_theme:
- description:
- - The realm admin theme.
- aliases:
- - adminTheme
- type: str
- attributes:
- description:
- - The realm attributes.
- type: dict
- browser_flow:
- description:
- - The realm browser flow.
- aliases:
- - browserFlow
- type: str
- browser_security_headers:
- description:
- - The realm browser security headers.
- aliases:
- - browserSecurityHeaders
- type: dict
- brute_force_protected:
- description:
- - The realm brute force protected.
- aliases:
- - bruteForceProtected
- type: bool
- client_authentication_flow:
- description:
- - The realm client authentication flow.
- aliases:
- - clientAuthenticationFlow
- type: str
- client_scope_mappings:
- description:
- - The realm client scope mappings.
- aliases:
- - clientScopeMappings
- type: dict
- default_default_client_scopes:
- description:
- - The realm default default client scopes.
- aliases:
- - defaultDefaultClientScopes
- type: list
- elements: str
- default_groups:
- description:
- - The realm default groups.
- aliases:
- - defaultGroups
- type: list
- elements: str
- default_locale:
- description:
- - The realm default locale.
- aliases:
- - defaultLocale
- type: str
- default_optional_client_scopes:
- description:
- - The realm default optional client scopes.
- aliases:
- - defaultOptionalClientScopes
- type: list
- elements: str
- default_roles:
- description:
- - The realm default roles.
- aliases:
- - defaultRoles
- type: list
- elements: str
- default_signature_algorithm:
- description:
- - The realm default signature algorithm.
- aliases:
- - defaultSignatureAlgorithm
- type: str
- direct_grant_flow:
- description:
- - The realm direct grant flow.
- aliases:
- - directGrantFlow
- type: str
- display_name:
- description:
- - The realm display name.
- aliases:
- - displayName
- type: str
- display_name_html:
- description:
- - The realm display name HTML.
- aliases:
- - displayNameHtml
- type: str
- docker_authentication_flow:
- description:
- - The realm docker authentication flow.
- aliases:
- - dockerAuthenticationFlow
- type: str
- duplicate_emails_allowed:
- description:
- - The realm duplicate emails allowed option.
- aliases:
- - duplicateEmailsAllowed
- type: bool
- edit_username_allowed:
- description:
- - The realm edit username allowed option.
- aliases:
- - editUsernameAllowed
- type: bool
- email_theme:
- description:
- - The realm email theme.
- aliases:
- - emailTheme
- type: str
- enabled:
- description:
- - The realm enabled option.
- type: bool
- enabled_event_types:
- description:
- - The realm enabled event types.
- aliases:
- - enabledEventTypes
- type: list
- elements: str
- events_enabled:
- description:
- - Enables or disables login events for this realm.
- aliases:
- - eventsEnabled
- type: bool
- version_added: 3.6.0
- events_expiration:
- description:
- - The realm events expiration.
- aliases:
- - eventsExpiration
- type: int
- events_listeners:
- description:
- - The realm events listeners.
- aliases:
- - eventsListeners
- type: list
- elements: str
- failure_factor:
- description:
- - The realm failure factor.
- aliases:
- - failureFactor
- type: int
- internationalization_enabled:
- description:
- - The realm internationalization enabled option.
- aliases:
- - internationalizationEnabled
- type: bool
- login_theme:
- description:
- - The realm login theme.
- aliases:
- - loginTheme
- type: str
- login_with_email_allowed:
- description:
- - The realm login with email allowed option.
- aliases:
- - loginWithEmailAllowed
- type: bool
- max_delta_time_seconds:
- description:
- - The realm max delta time in seconds.
- aliases:
- - maxDeltaTimeSeconds
- type: int
- max_failure_wait_seconds:
- description:
- - The realm max failure wait in seconds.
- aliases:
- - maxFailureWaitSeconds
- type: int
- minimum_quick_login_wait_seconds:
- description:
- - The realm minimum quick login wait in seconds.
- aliases:
- - minimumQuickLoginWaitSeconds
- type: int
- not_before:
- description:
- - The realm not before.
- aliases:
- - notBefore
- type: int
- offline_session_idle_timeout:
- description:
- - The realm offline session idle timeout.
- aliases:
- - offlineSessionIdleTimeout
- type: int
- offline_session_max_lifespan:
- description:
- - The realm offline session max lifespan.
- aliases:
- - offlineSessionMaxLifespan
- type: int
- offline_session_max_lifespan_enabled:
- description:
- - The realm offline session max lifespan enabled option.
- aliases:
- - offlineSessionMaxLifespanEnabled
- type: bool
- otp_policy_algorithm:
- description:
- - The realm otp policy algorithm.
- aliases:
- - otpPolicyAlgorithm
- type: str
- otp_policy_digits:
- description:
- - The realm otp policy digits.
- aliases:
- - otpPolicyDigits
- type: int
- otp_policy_initial_counter:
- description:
- - The realm otp policy initial counter.
- aliases:
- - otpPolicyInitialCounter
- type: int
- otp_policy_look_ahead_window:
- description:
- - The realm otp policy look ahead window.
- aliases:
- - otpPolicyLookAheadWindow
- type: int
- otp_policy_period:
- description:
- - The realm otp policy period.
- aliases:
- - otpPolicyPeriod
- type: int
- otp_policy_type:
- description:
- - The realm otp policy type.
- aliases:
- - otpPolicyType
- type: str
- otp_supported_applications:
- description:
- - The realm otp supported applications.
- aliases:
- - otpSupportedApplications
- type: list
- elements: str
- password_policy:
- description:
- - The realm password policy.
- aliases:
- - passwordPolicy
- type: str
- permanent_lockout:
- description:
- - The realm permanent lockout.
- aliases:
- - permanentLockout
- type: bool
- quick_login_check_milli_seconds:
- description:
- - The realm quick login check in milliseconds.
- aliases:
- - quickLoginCheckMilliSeconds
- type: int
- refresh_token_max_reuse:
- description:
- - The realm refresh token max reuse.
- aliases:
- - refreshTokenMaxReuse
- type: int
- registration_allowed:
- description:
- - The realm registration allowed option.
- aliases:
- - registrationAllowed
- type: bool
- registration_email_as_username:
- description:
- - The realm registration email as username option.
- aliases:
- - registrationEmailAsUsername
- type: bool
- registration_flow:
- description:
- - The realm registration flow.
- aliases:
- - registrationFlow
- type: str
- remember_me:
- description:
- - The realm remember me option.
- aliases:
- - rememberMe
- type: bool
- reset_credentials_flow:
- description:
- - The realm reset credentials flow.
- aliases:
- - resetCredentialsFlow
- type: str
- reset_password_allowed:
- description:
- - The realm reset password allowed option.
- aliases:
- - resetPasswordAllowed
- type: bool
- revoke_refresh_token:
- description:
- - The realm revoke refresh token option.
- aliases:
- - revokeRefreshToken
- type: bool
- smtp_server:
- description:
- - The realm smtp server.
- aliases:
- - smtpServer
- type: dict
- ssl_required:
- description:
- - The realm ssl required option.
- choices: ['all', 'external', 'none']
- aliases:
- - sslRequired
- type: str
- sso_session_idle_timeout:
- description:
- - The realm sso session idle timeout.
- aliases:
- - ssoSessionIdleTimeout
- type: int
- sso_session_idle_timeout_remember_me:
- description:
- - The realm sso session idle timeout remember me.
- aliases:
- - ssoSessionIdleTimeoutRememberMe
- type: int
- sso_session_max_lifespan:
- description:
- - The realm sso session max lifespan.
- aliases:
- - ssoSessionMaxLifespan
- type: int
- sso_session_max_lifespan_remember_me:
- description:
- - The realm sso session max lifespan remember me.
- aliases:
- - ssoSessionMaxLifespanRememberMe
- type: int
- supported_locales:
- description:
- - The realm supported locales.
- aliases:
- - supportedLocales
- type: list
- elements: str
- user_managed_access_allowed:
- description:
- - The realm user managed access allowed option.
- aliases:
- - userManagedAccessAllowed
- type: bool
- verify_email:
- description:
- - The realm verify email option.
- aliases:
- - verifyEmail
- type: bool
- wait_increment_seconds:
- description:
- - The realm wait increment in seconds.
- aliases:
- - waitIncrementSeconds
- type: int
+ id:
+ description:
+ - The realm to create.
+ type: str
+ realm:
+ description:
+ - The realm name.
+ type: str
+ access_code_lifespan:
+ description:
+ - The realm access code lifespan.
+ aliases:
+ - accessCodeLifespan
+ type: int
+ access_code_lifespan_login:
+ description:
+ - The realm access code lifespan login.
+ aliases:
+ - accessCodeLifespanLogin
+ type: int
+ access_code_lifespan_user_action:
+ description:
+ - The realm access code lifespan user action.
+ aliases:
+ - accessCodeLifespanUserAction
+ type: int
+ access_token_lifespan:
+ description:
+ - The realm access token lifespan.
+ aliases:
+ - accessTokenLifespan
+ type: int
+ access_token_lifespan_for_implicit_flow:
+ description:
+ - The realm access token lifespan for implicit flow.
+ aliases:
+ - accessTokenLifespanForImplicitFlow
+ type: int
+ account_theme:
+ description:
+ - The realm account theme.
+ aliases:
+ - accountTheme
+ type: str
+ action_token_generated_by_admin_lifespan:
+ description:
+ - The realm action token generated by admin lifespan.
+ aliases:
+ - actionTokenGeneratedByAdminLifespan
+ type: int
+ action_token_generated_by_user_lifespan:
+ description:
+ - The realm action token generated by user lifespan.
+ aliases:
+ - actionTokenGeneratedByUserLifespan
+ type: int
+ admin_events_details_enabled:
+ description:
+ - The realm admin events details enabled.
+ aliases:
+ - adminEventsDetailsEnabled
+ type: bool
+ admin_events_enabled:
+ description:
+ - The realm admin events enabled.
+ aliases:
+ - adminEventsEnabled
+ type: bool
+ admin_theme:
+ description:
+ - The realm admin theme.
+ aliases:
+ - adminTheme
+ type: str
+ attributes:
+ description:
+ - The realm attributes.
+ type: dict
+ browser_flow:
+ description:
+ - The realm browser flow.
+ aliases:
+ - browserFlow
+ type: str
+ browser_security_headers:
+ description:
+ - The realm browser security headers.
+ aliases:
+ - browserSecurityHeaders
+ type: dict
+ brute_force_protected:
+ description:
+ - The realm brute force protected.
+ aliases:
+ - bruteForceProtected
+ type: bool
+ client_authentication_flow:
+ description:
+ - The realm client authentication flow.
+ aliases:
+ - clientAuthenticationFlow
+ type: str
+ client_scope_mappings:
+ description:
+ - The realm client scope mappings.
+ aliases:
+ - clientScopeMappings
+ type: dict
+ default_default_client_scopes:
+ description:
+ - The realm default default client scopes.
+ aliases:
+ - defaultDefaultClientScopes
+ type: list
+ elements: str
+ default_groups:
+ description:
+ - The realm default groups.
+ aliases:
+ - defaultGroups
+ type: list
+ elements: str
+ default_locale:
+ description:
+ - The realm default locale.
+ aliases:
+ - defaultLocale
+ type: str
+ default_optional_client_scopes:
+ description:
+ - The realm default optional client scopes.
+ aliases:
+ - defaultOptionalClientScopes
+ type: list
+ elements: str
+ default_roles:
+ description:
+ - The realm default roles.
+ aliases:
+ - defaultRoles
+ type: list
+ elements: str
+ default_signature_algorithm:
+ description:
+ - The realm default signature algorithm.
+ aliases:
+ - defaultSignatureAlgorithm
+ type: str
+ direct_grant_flow:
+ description:
+ - The realm direct grant flow.
+ aliases:
+ - directGrantFlow
+ type: str
+ display_name:
+ description:
+ - The realm display name.
+ aliases:
+ - displayName
+ type: str
+ display_name_html:
+ description:
+ - The realm display name HTML.
+ aliases:
+ - displayNameHtml
+ type: str
+ docker_authentication_flow:
+ description:
+ - The realm docker authentication flow.
+ aliases:
+ - dockerAuthenticationFlow
+ type: str
+ duplicate_emails_allowed:
+ description:
+ - The realm duplicate emails allowed option.
+ aliases:
+ - duplicateEmailsAllowed
+ type: bool
+ edit_username_allowed:
+ description:
+ - The realm edit username allowed option.
+ aliases:
+ - editUsernameAllowed
+ type: bool
+ email_theme:
+ description:
+ - The realm email theme.
+ aliases:
+ - emailTheme
+ type: str
+ enabled:
+ description:
+ - The realm enabled option.
+ type: bool
+ enabled_event_types:
+ description:
+ - The realm enabled event types.
+ aliases:
+ - enabledEventTypes
+ type: list
+ elements: str
+ events_enabled:
+ description:
+ - Enables or disables login events for this realm.
+ aliases:
+ - eventsEnabled
+ type: bool
+ version_added: 3.6.0
+ events_expiration:
+ description:
+ - The realm events expiration.
+ aliases:
+ - eventsExpiration
+ type: int
+ events_listeners:
+ description:
+ - The realm events listeners.
+ aliases:
+ - eventsListeners
+ type: list
+ elements: str
+ failure_factor:
+ description:
+ - The realm failure factor.
+ aliases:
+ - failureFactor
+ type: int
+ internationalization_enabled:
+ description:
+ - The realm internationalization enabled option.
+ aliases:
+ - internationalizationEnabled
+ type: bool
+ login_theme:
+ description:
+ - The realm login theme.
+ aliases:
+ - loginTheme
+ type: str
+ login_with_email_allowed:
+ description:
+ - The realm login with email allowed option.
+ aliases:
+ - loginWithEmailAllowed
+ type: bool
+ max_delta_time_seconds:
+ description:
+ - The realm max delta time in seconds.
+ aliases:
+ - maxDeltaTimeSeconds
+ type: int
+ max_failure_wait_seconds:
+ description:
+ - The realm max failure wait in seconds.
+ aliases:
+ - maxFailureWaitSeconds
+ type: int
+ minimum_quick_login_wait_seconds:
+ description:
+ - The realm minimum quick login wait in seconds.
+ aliases:
+ - minimumQuickLoginWaitSeconds
+ type: int
+ not_before:
+ description:
+ - The realm not before.
+ aliases:
+ - notBefore
+ type: int
+ offline_session_idle_timeout:
+ description:
+ - The realm offline session idle timeout.
+ aliases:
+ - offlineSessionIdleTimeout
+ type: int
+ offline_session_max_lifespan:
+ description:
+ - The realm offline session max lifespan.
+ aliases:
+ - offlineSessionMaxLifespan
+ type: int
+ offline_session_max_lifespan_enabled:
+ description:
+ - The realm offline session max lifespan enabled option.
+ aliases:
+ - offlineSessionMaxLifespanEnabled
+ type: bool
+ otp_policy_algorithm:
+ description:
+ - The realm otp policy algorithm.
+ aliases:
+ - otpPolicyAlgorithm
+ type: str
+ otp_policy_digits:
+ description:
+ - The realm otp policy digits.
+ aliases:
+ - otpPolicyDigits
+ type: int
+ otp_policy_initial_counter:
+ description:
+ - The realm otp policy initial counter.
+ aliases:
+ - otpPolicyInitialCounter
+ type: int
+ otp_policy_look_ahead_window:
+ description:
+ - The realm otp policy look ahead window.
+ aliases:
+ - otpPolicyLookAheadWindow
+ type: int
+ otp_policy_period:
+ description:
+ - The realm otp policy period.
+ aliases:
+ - otpPolicyPeriod
+ type: int
+ otp_policy_type:
+ description:
+ - The realm otp policy type.
+ aliases:
+ - otpPolicyType
+ type: str
+ otp_supported_applications:
+ description:
+ - The realm otp supported applications.
+ aliases:
+ - otpSupportedApplications
+ type: list
+ elements: str
+ password_policy:
+ description:
+ - The realm password policy.
+ aliases:
+ - passwordPolicy
+ type: str
+ organizations_enabled:
+ description:
+ - Enables support for experimental organization feature.
+ aliases:
+ - organizationsEnabled
+ type: bool
+ version_added: 10.0.0
+ permanent_lockout:
+ description:
+ - The realm permanent lockout.
+ aliases:
+ - permanentLockout
+ type: bool
+ quick_login_check_milli_seconds:
+ description:
+ - The realm quick login check in milliseconds.
+ aliases:
+ - quickLoginCheckMilliSeconds
+ type: int
+ refresh_token_max_reuse:
+ description:
+ - The realm refresh token max reuse.
+ aliases:
+ - refreshTokenMaxReuse
+ type: int
+ registration_allowed:
+ description:
+ - The realm registration allowed option.
+ aliases:
+ - registrationAllowed
+ type: bool
+ registration_email_as_username:
+ description:
+ - The realm registration email as username option.
+ aliases:
+ - registrationEmailAsUsername
+ type: bool
+ registration_flow:
+ description:
+ - The realm registration flow.
+ aliases:
+ - registrationFlow
+ type: str
+ remember_me:
+ description:
+ - The realm remember me option.
+ aliases:
+ - rememberMe
+ type: bool
+ reset_credentials_flow:
+ description:
+ - The realm reset credentials flow.
+ aliases:
+ - resetCredentialsFlow
+ type: str
+ reset_password_allowed:
+ description:
+ - The realm reset password allowed option.
+ aliases:
+ - resetPasswordAllowed
+ type: bool
+ revoke_refresh_token:
+ description:
+ - The realm revoke refresh token option.
+ aliases:
+ - revokeRefreshToken
+ type: bool
+ smtp_server:
+ description:
+ - The realm smtp server.
+ aliases:
+ - smtpServer
+ type: dict
+ ssl_required:
+ description:
+ - The realm ssl required option.
+ choices: ['all', 'external', 'none']
+ aliases:
+ - sslRequired
+ type: str
+ sso_session_idle_timeout:
+ description:
+ - The realm sso session idle timeout.
+ aliases:
+ - ssoSessionIdleTimeout
+ type: int
+ sso_session_idle_timeout_remember_me:
+ description:
+ - The realm sso session idle timeout remember me.
+ aliases:
+ - ssoSessionIdleTimeoutRememberMe
+ type: int
+ sso_session_max_lifespan:
+ description:
+ - The realm sso session max lifespan.
+ aliases:
+ - ssoSessionMaxLifespan
+ type: int
+ sso_session_max_lifespan_remember_me:
+ description:
+ - The realm sso session max lifespan remember me.
+ aliases:
+ - ssoSessionMaxLifespanRememberMe
+ type: int
+ supported_locales:
+ description:
+ - The realm supported locales.
+ aliases:
+ - supportedLocales
+ type: list
+ elements: str
+ user_managed_access_allowed:
+ description:
+ - The realm user managed access allowed option.
+ aliases:
+ - userManagedAccessAllowed
+ type: bool
+ verify_email:
+ description:
+ - The realm verify email option.
+ aliases:
+ - verifyEmail
+ type: bool
+ wait_increment_seconds:
+ description:
+ - The realm wait increment in seconds.
+ aliases:
+ - waitIncrementSeconds
+ type: int
extends_documentation_fragment:
- - community.general.keycloak
- - community.general.attributes
+ - community.general.keycloak
+ - community.general.keycloak.actiongroup_keycloak
+ - community.general.attributes
author:
- - Christophe Gilles (@kris2kris)
-'''
+ - Christophe Gilles (@kris2kris)
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Create or update Keycloak realm (minimal example)
community.general.keycloak_realm:
auth_client_id: admin-cli
@@ -523,8 +528,7 @@ EXAMPLES = '''
auth_realm: master
auth_username: USERNAME
auth_password: PASSWORD
- id: realm
- realm: realm
+ realm: unique_realm_name
state: present
- name: Delete a Keycloak realm
@@ -534,48 +538,47 @@ EXAMPLES = '''
auth_realm: master
auth_username: USERNAME
auth_password: PASSWORD
- id: test
+ realm: unique_realm_name
state: absent
+"""
-'''
-
-RETURN = '''
+RETURN = r"""
msg:
- description: Message as to what action was taken.
- returned: always
- type: str
- sample: "Realm testrealm has been updated"
+ description: Message as to what action was taken.
+ returned: always
+ type: str
+ sample: "Realm testrealm has been updated"
proposed:
- description: Representation of proposed realm.
- returned: always
- type: dict
- sample: {
- id: "test"
- }
+ description: Representation of proposed realm.
+ returned: always
+ type: dict
+ sample: {"realm": "test"}
existing:
- description: Representation of existing realm (sample is truncated).
- returned: always
- type: dict
- sample: {
- "adminUrl": "http://www.example.com/admin_url",
- "attributes": {
- "request.object.signature.alg": "RS256",
- }
+ description: Representation of existing realm (sample is truncated).
+ returned: always
+ type: dict
+ sample:
+ {
+ "adminUrl": "http://www.example.com/admin_url",
+ "attributes": {
+ "request.object.signature.alg": "RS256"
+ }
}
end_state:
- description: Representation of realm after module execution (sample is truncated).
- returned: on success
- type: dict
- sample: {
- "adminUrl": "http://www.example.com/admin_url",
- "attributes": {
- "request.object.signature.alg": "RS256",
- }
+ description: Representation of realm after module execution (sample is truncated).
+ returned: on success
+ type: dict
+ sample:
+ {
+ "adminUrl": "http://www.example.com/admin_url",
+ "attributes": {
+ "request.object.signature.alg": "RS256"
+ }
}
-'''
+"""
from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, camel, \
keycloak_argument_spec, get_token, KeycloakError
@@ -686,6 +689,7 @@ def main():
otp_policy_type=dict(type='str', aliases=['otpPolicyType']),
otp_supported_applications=dict(type='list', elements='str', aliases=['otpSupportedApplications']),
password_policy=dict(type='str', aliases=['passwordPolicy'], no_log=False),
+ organizations_enabled=dict(type='bool', aliases=['organizationsEnabled']),
permanent_lockout=dict(type='bool', aliases=['permanentLockout']),
quick_login_check_milli_seconds=dict(type='int', aliases=['quickLoginCheckMilliSeconds']),
refresh_token_max_reuse=dict(type='int', aliases=['refreshTokenMaxReuse'], no_log=False),
@@ -713,8 +717,10 @@ def main():
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True,
required_one_of=([['id', 'realm', 'enabled'],
- ['token', 'auth_realm', 'auth_username', 'auth_password']]),
- required_together=([['auth_realm', 'auth_username', 'auth_password']]))
+ ['token', 'auth_realm', 'auth_username', 'auth_password', 'auth_client_id', 'auth_client_secret']]),
+ required_together=([['auth_username', 'auth_password']]),
+ required_by={'refresh_token': 'auth_realm'},
+ )
result = dict(changed=False, msg='', diff={}, proposed={}, existing={}, end_state={})
@@ -772,9 +778,6 @@ def main():
# Process a creation
result['changed'] = True
- if 'id' not in desired_realm:
- module.fail_json(msg='id needs to be specified when creating a new realm')
-
if module._diff:
result['diff'] = dict(before='', after=sanitize_cr(desired_realm))
@@ -783,11 +786,11 @@ def main():
# create it
kc.create_realm(desired_realm)
- after_realm = kc.get_realm_by_id(desired_realm['id'])
+ after_realm = kc.get_realm_by_id(desired_realm['realm'])
result['end_state'] = sanitize_cr(after_realm)
- result['msg'] = 'Realm %s has been created.' % desired_realm['id']
+ result['msg'] = 'Realm %s has been created.' % desired_realm['realm']
module.exit_json(**result)
else:
@@ -821,7 +824,7 @@ def main():
result['diff'] = dict(before=before_realm_sanitized,
after=sanitize_cr(after_realm))
- result['msg'] = 'Realm %s has been updated.' % desired_realm['id']
+ result['msg'] = 'Realm %s has been updated.' % desired_realm['realm']
module.exit_json(**result)
else:
@@ -840,7 +843,7 @@ def main():
result['proposed'] = {}
result['end_state'] = {}
- result['msg'] = 'Realm %s has been deleted.' % before_realm['id']
+ result['msg'] = 'Realm %s has been deleted.' % before_realm['realm']
module.exit_json(**result)
diff --git a/plugins/modules/keycloak_realm_info.py b/plugins/modules/keycloak_realm_info.py
index 5c2ebb4c9e..501ca48c21 100644
--- a/plugins/modules/keycloak_realm_info.py
+++ b/plugins/modules/keycloak_realm_info.py
@@ -8,98 +8,94 @@
from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
module: keycloak_realm_info
-short_description: Allows obtaining Keycloak realm public information via Keycloak API
+short_description: Allows obtaining Keycloak realm public information using Keycloak API
version_added: 4.3.0
description:
- - This module allows you to get Keycloak realm public information via the Keycloak REST API.
-
- - The names of module options are snake_cased versions of the camelCase ones found in the
- Keycloak API and its documentation at U(https://www.keycloak.org/docs-api/8.0/rest-api/index.html).
-
- - Attributes are multi-valued in the Keycloak API. All attributes are lists of individual values and will
- be returned that way by this module. You may pass single values for attributes when calling the module,
- and this will be translated into a list suitable for the API.
-
+ - This module allows you to get Keycloak realm public information using the Keycloak REST API.
+ - The names of module options are snake_cased versions of the camelCase ones found in the Keycloak API and its documentation
+ at U(https://www.keycloak.org/docs-api/8.0/rest-api/index.html).
+ - Attributes are multi-valued in the Keycloak API. All attributes are lists of individual values and are returned that way
+ by this module. You may pass single values for attributes when calling the module, and this is translated into a list
+ suitable for the API.
extends_documentation_fragment:
- - community.general.attributes
- - community.general.attributes.info_module
+ - community.general.attributes
+ - community.general.attributes.info_module
options:
- auth_keycloak_url:
- description:
- - URL to the Keycloak instance.
- type: str
- required: true
- aliases:
- - url
- validate_certs:
- description:
- - Verify TLS certificates (do not disable this in production).
- type: bool
- default: true
+ auth_keycloak_url:
+ description:
+ - URL to the Keycloak instance.
+ type: str
+ required: true
+ aliases:
+ - url
+ validate_certs:
+ description:
+ - Verify TLS certificates (do not disable this in production).
+ type: bool
+ default: true
- realm:
- type: str
- description:
- - They Keycloak realm ID.
- default: 'master'
+ realm:
+ type: str
+ description:
+ - They Keycloak realm ID.
+ default: 'master'
author:
- - Fynn Chen (@fynncfchen)
-'''
+ - Fynn Chen (@fynncfchen)
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Get a Keycloak public key
community.general.keycloak_realm_info:
realm: MyCustomRealm
auth_keycloak_url: https://auth.example.com/auth
delegate_to: localhost
-'''
+"""
-RETURN = '''
+RETURN = r"""
msg:
- description: Message as to what action was taken.
- returned: always
- type: str
+ description: Message as to what action was taken.
+ returned: always
+ type: str
realm_info:
- description:
- - Representation of the realm public information.
- returned: always
- type: dict
- contains:
- realm:
- description: Realm ID.
- type: str
- returned: always
- sample: MyRealm
- public_key:
- description: Public key of the realm.
- type: str
- returned: always
- sample: MIIBIjANBgkqhkiG9w0BAQEFAAO...
- token-service:
- description: Token endpoint URL.
- type: str
- returned: always
- sample: https://auth.example.com/auth/realms/MyRealm/protocol/openid-connect
- account-service:
- description: Account console URL.
- type: str
- returned: always
- sample: https://auth.example.com/auth/realms/MyRealm/account
- tokens-not-before:
- description: The token not before.
- type: int
- returned: always
- sample: 0
-'''
+ description:
+ - Representation of the realm public information.
+ returned: always
+ type: dict
+ contains:
+ realm:
+ description: Realm ID.
+ type: str
+ returned: always
+ sample: MyRealm
+ public_key:
+ description: Public key of the realm.
+ type: str
+ returned: always
+ sample: MIIBIjANBgkqhkiG9w0BAQEFAAO...
+ token-service:
+ description: Token endpoint URL.
+ type: str
+ returned: always
+ sample: https://auth.example.com/auth/realms/MyRealm/protocol/openid-connect
+ account-service:
+ description: Account console URL.
+ type: str
+ returned: always
+ sample: https://auth.example.com/auth/realms/MyRealm/account
+ tokens-not-before:
+ description: The token not before.
+ type: int
+ returned: always
+ sample: 0
+"""
from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI
from ansible.module_utils.basic import AnsibleModule
diff --git a/plugins/modules/keycloak_realm_key.py b/plugins/modules/keycloak_realm_key.py
index edc8a6068e..dbb284ec4b 100644
--- a/plugins/modules/keycloak_realm_key.py
+++ b/plugins/modules/keycloak_realm_key.py
@@ -9,142 +9,128 @@
from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
module: keycloak_realm_key
-short_description: Allows administration of Keycloak realm keys via Keycloak API
+short_description: Allows administration of Keycloak realm keys using Keycloak API
version_added: 7.5.0
description:
- - This module allows the administration of Keycloak realm keys via the Keycloak REST API. It
- requires access to the REST API via OpenID Connect; the user connecting and the realm being
- used must have the requisite access rights. In a default Keycloak installation, admin-cli
- and an admin user would work, as would a separate realm definition with the scope tailored
- to your needs and a user having the expected roles.
-
- - The names of module options are snake_cased versions of the camelCase ones found in the
- Keycloak API and its documentation at U(https://www.keycloak.org/docs-api/8.0/rest-api/index.html).
- Aliases are provided so camelCased versions can be used as well.
-
- - This module is unable to detect changes to the actual cryptographic key after importing it.
- However, if some other property is changed alongside the cryptographic key, then the key
- will also get changed as a side-effect, as the JSON payload needs to include the private key.
- This can be considered either a bug or a feature, as the alternative would be to always
- update the realm key whether it has changed or not.
-
- - If certificate is not explicitly provided it will be dynamically created by Keycloak.
- Therefore comparing the current state of the certificate to the desired state (which may be
- empty) is not possible.
-
+ - This module allows the administration of Keycloak realm keys using the Keycloak REST API. It requires access to the REST
+ API using OpenID Connect; the user connecting and the realm being used must have the requisite access rights. In a default
+ Keycloak installation, admin-cli and an admin user would work, as would a separate realm definition with the scope tailored
+ to your needs and a user having the expected roles.
+ - The names of module options are snake_cased versions of the camelCase ones found in the Keycloak API and its documentation
+ at U(https://www.keycloak.org/docs-api/8.0/rest-api/index.html). Aliases are provided so camelCased versions can be used
+ as well.
+ - This module is unable to detect changes to the actual cryptographic key after importing it. However, if some other property
+ is changed alongside the cryptographic key, then the key also changes as a side-effect, as the JSON payload needs to include
+ the private key. This can be considered either a bug or a feature, as the alternative would be to always update the realm
+ key whether it has changed or not.
attributes:
- check_mode:
- support: full
- diff_mode:
- support: partial
+ check_mode:
+ support: full
+ diff_mode:
+ support: partial
+ action_group:
+ version_added: 10.2.0
options:
- state:
+ state:
+ description:
+ - State of the keycloak realm key.
+ - On V(present), the realm key is created (or updated if it exists already).
+ - On V(absent), the realm key is removed if it exists.
+ choices: ['present', 'absent']
+ default: 'present'
+ type: str
+ name:
+ description:
+ - Name of the realm key to create.
+ type: str
+ required: true
+ force:
+ description:
+ - Enforce the state of the private key and certificate. This is not automatically the case as this module is unable
+ to determine the current state of the private key and thus cannot trigger an update based on an actual divergence.
+ That said, a private key update may happen even if force is false as a side-effect of other changes.
+ default: false
+ type: bool
+ parent_id:
+ description:
+ - The parent_id of the realm key. In practice the name of the realm.
+ type: str
+ required: true
+ provider_id:
+ description:
+ - The name of the "provider ID" for the key.
+ - The value V(rsa-enc) has been added in community.general 8.2.0.
+ choices: ['rsa', 'rsa-enc']
+ default: 'rsa'
+ type: str
+ config:
+ description:
+ - Dict specifying the key and its properties.
+ type: dict
+ suboptions:
+ active:
description:
- - State of the keycloak realm key.
- - On V(present), the realm key will be created (or updated if it exists already).
- - On V(absent), the realm key will be removed if it exists.
- choices: ['present', 'absent']
- default: 'present'
- type: str
- name:
- description:
- - Name of the realm key to create.
- type: str
- required: true
- force:
- description:
- - Enforce the state of the private key and certificate. This is not automatically the
- case as this module is unable to determine the current state of the private key and
- thus cannot trigger an update based on an actual divergence. That said, a private key
- update may happen even if force is false as a side-effect of other changes.
- default: false
+ - Whether they key is active or inactive. Not to be confused with the state of the Ansible resource managed by the
+ O(state) parameter.
+ default: true
type: bool
- parent_id:
+ enabled:
description:
- - The parent_id of the realm key. In practice the name of the realm.
- type: str
+ - Whether the key is enabled or disabled. Not to be confused with the state of the Ansible resource managed by the
+ O(state) parameter.
+ default: true
+ type: bool
+ priority:
+ description:
+ - The priority of the key.
+ type: int
required: true
- provider_id:
+ algorithm:
description:
- - The name of the "provider ID" for the key.
- - The value V(rsa-enc) has been added in community.general 8.2.0.
- choices: ['rsa', 'rsa-enc']
- default: 'rsa'
+ - Key algorithm.
+ - The values V(RS384), V(RS512), V(PS256), V(PS384), V(PS512), V(RSA1_5), V(RSA-OAEP), V(RSA-OAEP-256) have been
+ added in community.general 8.2.0.
+ default: RS256
+ choices: ['RS256', 'RS384', 'RS512', 'PS256', 'PS384', 'PS512', 'RSA1_5', 'RSA-OAEP', 'RSA-OAEP-256']
type: str
- config:
+ private_key:
description:
- - Dict specifying the key and its properties.
- type: dict
- suboptions:
- active:
- description:
- - Whether they key is active or inactive. Not to be confused with the state
- of the Ansible resource managed by the O(state) parameter.
- default: true
- type: bool
- enabled:
- description:
- - Whether the key is enabled or disabled. Not to be confused with the state
- of the Ansible resource managed by the O(state) parameter.
- default: true
- type: bool
- priority:
- description:
- - The priority of the key.
- type: int
- required: true
- algorithm:
- description:
- - Key algorithm.
- - The values V(RS384), V(RS512), V(PS256), V(PS384), V(PS512), V(RSA1_5),
- V(RSA-OAEP), V(RSA-OAEP-256) have been added in community.general 8.2.0.
- default: RS256
- choices: ['RS256', 'RS384', 'RS512', 'PS256', 'PS384', 'PS512', 'RSA1_5', 'RSA-OAEP', 'RSA-OAEP-256']
- type: str
- private_key:
- description:
- - The private key as an ASCII string. Contents of the key must match O(config.algorithm)
- and O(provider_id).
- - Please note that the module cannot detect whether the private key specified differs from the
- current state's private key. Use O(force=true) to force the module to update the private key
- if you expect it to be updated.
- required: true
- type: str
- certificate:
- description:
- - A certificate signed with the private key as an ASCII string. Contents of the
- key must match O(config.algorithm) and O(provider_id).
- - If you want Keycloak to automatically generate a certificate using your private key
- then set this to an empty string.
- required: true
- type: str
+ - The private key as an ASCII string. Contents of the key must match O(config.algorithm) and O(provider_id).
+ - Please note that the module cannot detect whether the private key specified differs from the current state's private
+ key. Use O(force=true) to force the module to update the private key if you expect it to be updated.
+ required: true
+ type: str
+ certificate:
+ description:
+ - A certificate signed with the private key as an ASCII string. Contents of the key must match O(config.algorithm)
+ and O(provider_id).
+ - If you want Keycloak to automatically generate a certificate using your private key then set this to an empty
+ string.
+ required: true
+ type: str
notes:
- - Current value of the private key cannot be fetched from Keycloak.
- Therefore comparing its desired state to the current state is not
- possible.
- - If certificate is not explicitly provided it will be dynamically created
- by Keycloak. Therefore comparing the current state of the certificate to
- the desired state (which may be empty) is not possible.
- - Due to the private key and certificate options the module is
- B(not fully idempotent). You can use O(force=true) to force the module
- to always update if you know that the private key might have changed.
-
+ - Current value of the private key cannot be fetched from Keycloak. Therefore comparing its desired state to the current
+ state is not possible.
+ - If O(config.certificate) is not explicitly provided it is dynamically created by Keycloak. Therefore comparing the current
+ state of the certificate to the desired state (which may be empty) is not possible.
+ - Due to the private key and certificate options the module is B(not fully idempotent). You can use O(force=true) to force
+ the module to ensure updating if you know that the private key might have changed.
extends_documentation_fragment:
- - community.general.keycloak
- - community.general.attributes
+ - community.general.keycloak
+ - community.general.keycloak.actiongroup_keycloak
+ - community.general.attributes
author:
- - Samuli Seppänen (@mattock)
-'''
+ - Samuli Seppänen (@mattock)
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Manage Keycloak realm key (certificate autogenerated by Keycloak)
community.general.keycloak_realm_key:
name: custom
@@ -179,54 +165,63 @@ EXAMPLES = '''
active: true
priority: 120
algorithm: RS256
-'''
+"""
-RETURN = '''
+RETURN = r"""
msg:
- description: Message as to what action was taken.
- returned: always
- type: str
+ description: Message as to what action was taken.
+ returned: always
+ type: str
end_state:
- description: Representation of the keycloak_realm_key after module execution.
- returned: on success
- type: dict
- contains:
- id:
- description: ID of the realm key.
- type: str
- returned: when O(state=present)
- sample: 5b7ec13f-99da-46ad-8326-ab4c73cf4ce4
- name:
- description: Name of the realm key.
- type: str
- returned: when O(state=present)
- sample: mykey
- parentId:
- description: ID of the realm this key belongs to.
- type: str
- returned: when O(state=present)
- sample: myrealm
- providerId:
- description: The ID of the key provider.
- type: str
- returned: when O(state=present)
- sample: rsa
- providerType:
- description: The type of provider.
- type: str
- returned: when O(state=present)
- config:
- description: Realm key configuration.
- type: dict
- returned: when O(state=present)
- sample: {
- "active": ["true"],
- "algorithm": ["RS256"],
- "enabled": ["true"],
- "priority": ["140"]
- }
-'''
+ description: Representation of the keycloak_realm_key after module execution.
+ returned: on success
+ type: dict
+ contains:
+ id:
+ description: ID of the realm key.
+ type: str
+ returned: when O(state=present)
+ sample: 5b7ec13f-99da-46ad-8326-ab4c73cf4ce4
+ name:
+ description: Name of the realm key.
+ type: str
+ returned: when O(state=present)
+ sample: mykey
+ parentId:
+ description: ID of the realm this key belongs to.
+ type: str
+ returned: when O(state=present)
+ sample: myrealm
+ providerId:
+ description: The ID of the key provider.
+ type: str
+ returned: when O(state=present)
+ sample: rsa
+ providerType:
+ description: The type of provider.
+ type: str
+ returned: when O(state=present)
+ config:
+ description: Realm key configuration.
+ type: dict
+ returned: when O(state=present)
+ sample:
+ {
+ "active": [
+ "true"
+ ],
+ "algorithm": [
+ "RS256"
+ ],
+ "enabled": [
+ "true"
+ ],
+ "priority": [
+ "140"
+ ]
+ }
+"""
from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, camel, \
keycloak_argument_spec, get_token, KeycloakError
@@ -280,8 +275,10 @@ def main():
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True,
- required_one_of=([['token', 'auth_realm', 'auth_username', 'auth_password']]),
- required_together=([['auth_realm', 'auth_username', 'auth_password']]))
+ required_one_of=([['token', 'auth_realm', 'auth_username', 'auth_password', 'auth_client_id', 'auth_client_secret']]),
+ required_together=([['auth_username', 'auth_password']]),
+ required_by={'refresh_token': 'auth_realm'},
+ )
# Initialize the result object. Only "changed" seems to have special
# meaning for Ansible.
diff --git a/plugins/modules/keycloak_realm_keys_metadata_info.py b/plugins/modules/keycloak_realm_keys_metadata_info.py
index ef4048b891..8340c8f2a5 100644
--- a/plugins/modules/keycloak_realm_keys_metadata_info.py
+++ b/plugins/modules/keycloak_realm_keys_metadata_info.py
@@ -9,37 +9,39 @@ from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = """
----
+DOCUMENTATION = r"""
module: keycloak_realm_keys_metadata_info
-short_description: Allows obtaining Keycloak realm keys metadata via Keycloak API
+short_description: Allows obtaining Keycloak realm keys metadata using Keycloak API
version_added: 9.3.0
description:
- - This module allows you to get Keycloak realm keys metadata via the Keycloak REST API.
-
- - The names of module options are snake_cased versions of the camelCase ones found in the
- Keycloak API and its documentation at U(https://www.keycloak.org/docs-api/latest/rest-api/index.html).
+ - This module allows you to get Keycloak realm keys metadata using the Keycloak REST API.
+ - The names of module options are snake_cased versions of the camelCase ones found in the Keycloak API and its documentation
+ at U(https://www.keycloak.org/docs-api/latest/rest-api/index.html).
+attributes:
+ action_group:
+ version_added: 10.2.0
options:
- realm:
- type: str
- description:
- - They Keycloak realm to fetch keys metadata.
- default: 'master'
+ realm:
+ type: str
+ description:
+ - They Keycloak realm to fetch keys metadata.
+ default: 'master'
extends_documentation_fragment:
- - community.general.keycloak
- - community.general.attributes
- - community.general.attributes.info_module
+ - community.general.keycloak
+ - community.general.keycloak.actiongroup_keycloak
+ - community.general.attributes
+ - community.general.attributes.info_module
author:
- - Thomas Bach (@thomasbach-dev)
+ - Thomas Bach (@thomasbach-dev)
"""
-EXAMPLES = """
+EXAMPLES = r"""
- name: Fetch Keys metadata
community.general.keycloak_realm_keys_metadata_info:
auth_keycloak_url: https://auth.example.com/auth
@@ -62,30 +64,28 @@ EXAMPLES = """
delegate_to: localhost
"""
-RETURN = """
+RETURN = r"""
msg:
- description: Message as to what action was taken.
- returned: always
- type: str
+ description: Message as to what action was taken.
+ returned: always
+ type: str
keys_metadata:
- description:
+ description:
- - Representation of the realm keys metadata (see
- U(https://www.keycloak.org/docs-api/latest/rest-api/index.html#KeysMetadataRepresentation)).
-
- returned: always
- type: dict
- contains:
- active:
- description: A mapping (that is, a dict) from key algorithms to UUIDs.
- type: dict
- returned: always
- keys:
- description: A list of dicts providing detailed information on the keys.
- type: list
- elements: dict
- returned: always
+ - Representation of the realm keys metadata (see U(https://www.keycloak.org/docs-api/latest/rest-api/index.html#KeysMetadataRepresentation)).
+ returned: always
+ type: dict
+ contains:
+ active:
+ description: A mapping (that is, a dict) from key algorithms to UUIDs.
+ type: dict
+ returned: always
+ keys:
+ description: A list of dicts providing detailed information on the keys.
+ type: list
+ elements: dict
+ returned: always
"""
from ansible.module_utils.basic import AnsibleModule
@@ -104,8 +104,9 @@ def main():
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
- required_one_of=([["token", "auth_realm", "auth_username", "auth_password"]]),
- required_together=([["auth_realm", "auth_username", "auth_password"]]),
+ required_one_of=([['token', 'auth_realm', 'auth_username', 'auth_password', 'auth_client_id', 'auth_client_secret']]),
+ required_together=([['auth_username', 'auth_password']]),
+ required_by={'refresh_token': 'auth_realm'},
)
result = dict(changed=False, msg="", keys_metadata="")
diff --git a/plugins/modules/keycloak_realm_rolemapping.py b/plugins/modules/keycloak_realm_rolemapping.py
index 693cf9894a..2b6b6a4eda 100644
--- a/plugins/modules/keycloak_realm_rolemapping.py
+++ b/plugins/modules/keycloak_realm_rolemapping.py
@@ -8,8 +8,7 @@
from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
module: keycloak_realm_rolemapping
short_description: Allows administration of Keycloak realm role mappings into groups with the Keycloak API
@@ -17,116 +16,106 @@ short_description: Allows administration of Keycloak realm role mappings into gr
version_added: 8.2.0
description:
- - This module allows you to add, remove or modify Keycloak realm role
- mappings into groups with the Keycloak REST API. It requires access to the
- REST API via OpenID Connect; the user connecting and the client being used
- must have the requisite access rights. In a default Keycloak installation,
- admin-cli and an admin user would work, as would a separate client
- definition with the scope tailored to your needs and a user having the
- expected roles.
-
- - The names of module options are snake_cased versions of the camelCase ones found in the
- Keycloak API and its documentation at U(https://www.keycloak.org/docs-api/18.0/rest-api/index.html).
-
- - Attributes are multi-valued in the Keycloak API. All attributes are lists of individual values and will
- be returned that way by this module. You may pass single values for attributes when calling the module,
- and this will be translated into a list suitable for the API.
-
- - When updating a group_rolemapping, where possible provide the role ID to the module. This removes a lookup
- to the API to translate the name into the role ID.
-
+ - This module allows you to add, remove or modify Keycloak realm role mappings into groups with the Keycloak REST API. It
+ requires access to the REST API using OpenID Connect; the user connecting and the client being used must have the requisite
+ access rights. In a default Keycloak installation, admin-cli and an admin user would work, as would a separate client
+ definition with the scope tailored to your needs and a user having the expected roles.
+ - The names of module options are snake_cased versions of the camelCase ones found in the Keycloak API and its documentation
+ at U(https://www.keycloak.org/docs-api/18.0/rest-api/index.html).
+ - Attributes are multi-valued in the Keycloak API. All attributes are lists of individual values and are returned that way
+ by this module. You may pass single values for attributes when calling the module, and this is translated into a list
+ suitable for the API.
+ - When updating a group_rolemapping, where possible provide the role ID to the module. This removes a lookup to the API
+ to translate the name into the role ID.
attributes:
- check_mode:
- support: full
- diff_mode:
- support: full
+ check_mode:
+ support: full
+ diff_mode:
+ support: full
+ action_group:
+ version_added: 10.2.0
options:
- state:
- description:
- - State of the realm_rolemapping.
- - On C(present), the realm_rolemapping will be created if it does not yet exist, or updated with the parameters you provide.
- - On C(absent), the realm_rolemapping will be removed if it exists.
- default: 'present'
- type: str
- choices:
- - present
- - absent
+ state:
+ description:
+ - State of the realm_rolemapping.
+ - On C(present), the realm_rolemapping is created if it does not yet exist, or updated with the parameters you provide.
+ - On C(absent), the realm_rolemapping is removed if it exists.
+ default: 'present'
+ type: str
+ choices:
+ - present
+ - absent
- realm:
+ realm:
+ type: str
+ description:
+ - They Keycloak realm under which this role_representation resides.
+ default: 'master'
+
+ group_name:
+ type: str
+ description:
+ - Name of the group to be mapped.
+ - This parameter is required (can be replaced by gid for less API call).
+ parents:
+ type: list
+ description:
+ - List of parent groups for the group to handle sorted top to bottom.
+ - Set this if your group is a subgroup and you do not provide the GID in O(gid).
+ elements: dict
+ suboptions:
+ id:
type: str
description:
- - They Keycloak realm under which this role_representation resides.
- default: 'master'
-
- group_name:
+ - Identify parent by ID.
+ - Needs less API calls than using O(parents[].name).
+ - A deep parent chain can be started at any point when first given parent is given as ID.
+ - Note that in principle both ID and name can be specified at the same time but current implementation only always
+ use just one of them, with ID being preferred.
+ name:
type: str
description:
- - Name of the group to be mapped.
- - This parameter is required (can be replaced by gid for less API call).
-
- parents:
- type: list
- description:
- - List of parent groups for the group to handle sorted top to bottom.
- - >-
- Set this if your group is a subgroup and you do not provide the GID in O(gid).
- elements: dict
- suboptions:
- id:
- type: str
- description:
- - Identify parent by ID.
- - Needs less API calls than using O(parents[].name).
- - A deep parent chain can be started at any point when first given parent is given as ID.
- - Note that in principle both ID and name can be specified at the same time
- but current implementation only always use just one of them, with ID
- being preferred.
- name:
- type: str
- description:
- - Identify parent by name.
- - Needs more internal API calls than using O(parents[].id) to map names to ID's under the hood.
- - When giving a parent chain with only names it must be complete up to the top.
- - Note that in principle both ID and name can be specified at the same time
- but current implementation only always use just one of them, with ID
- being preferred.
- gid:
+ - Identify parent by name.
+ - Needs more internal API calls than using O(parents[].id) to map names to ID's under the hood.
+ - When giving a parent chain with only names it must be complete up to the top.
+ - Note that in principle both ID and name can be specified at the same time but current implementation only always
+ use just one of them, with ID being preferred.
+ gid:
+ type: str
+ description:
+ - ID of the group to be mapped.
+ - This parameter is not required for updating or deleting the rolemapping but providing it reduces the number of API
+ calls required.
+ roles:
+ description:
+ - Roles to be mapped to the group.
+ type: list
+ elements: dict
+ suboptions:
+ name:
type: str
description:
- - ID of the group to be mapped.
- - This parameter is not required for updating or deleting the rolemapping but
- providing it will reduce the number of API calls required.
-
- roles:
+ - Name of the role_representation.
+ - This parameter is required only when creating or updating the role_representation.
+ id:
+ type: str
description:
- - Roles to be mapped to the group.
- type: list
- elements: dict
- suboptions:
- name:
- type: str
- description:
- - Name of the role_representation.
- - This parameter is required only when creating or updating the role_representation.
- id:
- type: str
- description:
- - The unique identifier for this role_representation.
- - This parameter is not required for updating or deleting a role_representation but
- providing it will reduce the number of API calls required.
-
+ - The unique identifier for this role_representation.
+ - This parameter is not required for updating or deleting a role_representation but providing it reduces the number
+ of API calls required.
extends_documentation_fragment:
- - community.general.keycloak
- - community.general.attributes
+ - community.general.keycloak
+ - community.general.keycloak.actiongroup_keycloak
+ - community.general.attributes
author:
- - Gaëtan Daubresse (@Gaetan2907)
- - Marius Huysamen (@mhuysamen)
- - Alexander Groß (@agross)
-'''
+ - Gaëtan Daubresse (@Gaetan2907)
+ - Marius Huysamen (@mhuysamen)
+ - Alexander Groß (@agross)
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Map a client role to a group, authentication with credentials
community.general.keycloak_realm_rolemapping:
realm: MyCustomRealm
@@ -192,49 +181,49 @@ EXAMPLES = '''
- name: role_name2
id: role_id2
delegate_to: localhost
-'''
+"""
-RETURN = '''
+RETURN = r"""
msg:
- description: Message as to what action was taken.
- returned: always
- type: str
- sample: "Role role1 assigned to group group1."
+ description: Message as to what action was taken.
+ returned: always
+ type: str
+ sample: "Role role1 assigned to group group1."
proposed:
- description: Representation of proposed client role mapping.
- returned: always
- type: dict
- sample: {
- clientId: "test"
- }
+ description: Representation of proposed client role mapping.
+ returned: always
+ type: dict
+ sample: {"clientId": "test"}
existing:
- description:
- - Representation of existing client role mapping.
- - The sample is truncated.
- returned: always
- type: dict
- sample: {
- "adminUrl": "http://www.example.com/admin_url",
- "attributes": {
- "request.object.signature.alg": "RS256",
- }
+ description:
+ - Representation of existing client role mapping.
+ - The sample is truncated.
+ returned: always
+ type: dict
+ sample:
+ {
+ "adminUrl": "http://www.example.com/admin_url",
+ "attributes": {
+ "request.object.signature.alg": "RS256"
+ }
}
end_state:
- description:
- - Representation of client role mapping after module execution.
- - The sample is truncated.
- returned: on success
- type: dict
- sample: {
- "adminUrl": "http://www.example.com/admin_url",
- "attributes": {
- "request.object.signature.alg": "RS256",
- }
+ description:
+ - Representation of client role mapping after module execution.
+ - The sample is truncated.
+ returned: on success
+ type: dict
+ sample:
+ {
+ "adminUrl": "http://www.example.com/admin_url",
+ "attributes": {
+ "request.object.signature.alg": "RS256"
+ }
}
-'''
+"""
from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import (
KeycloakAPI, keycloak_argument_spec, get_token, KeycloakError,
@@ -274,8 +263,10 @@ def main():
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True,
- required_one_of=([['token', 'auth_realm', 'auth_username', 'auth_password']]),
- required_together=([['auth_realm', 'auth_username', 'auth_password']]))
+ required_one_of=([['token', 'auth_realm', 'auth_username', 'auth_password', 'auth_client_id', 'auth_client_secret']]),
+ required_together=([['auth_username', 'auth_password']]),
+ required_by={'refresh_token': 'auth_realm'},
+ )
result = dict(changed=False, msg='', diff={}, proposed={}, existing={}, end_state={})
diff --git a/plugins/modules/keycloak_role.py b/plugins/modules/keycloak_role.py
index f3e01483f8..5b706354ed 100644
--- a/plugins/modules/keycloak_role.py
+++ b/plugins/modules/keycloak_role.py
@@ -8,121 +8,116 @@
from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
module: keycloak_role
-short_description: Allows administration of Keycloak roles via Keycloak API
+short_description: Allows administration of Keycloak roles using Keycloak API
version_added: 3.4.0
description:
- - This module allows you to add, remove or modify Keycloak roles via the Keycloak REST API.
- It requires access to the REST API via OpenID Connect; the user connecting and the client being
- used must have the requisite access rights. In a default Keycloak installation, admin-cli
- and an admin user would work, as would a separate client definition with the scope tailored
- to your needs and a user having the expected roles.
-
- - The names of module options are snake_cased versions of the camelCase ones found in the
- Keycloak API and its documentation at U(https://www.keycloak.org/docs-api/8.0/rest-api/index.html).
-
- - Attributes are multi-valued in the Keycloak API. All attributes are lists of individual values and will
- be returned that way by this module. You may pass single values for attributes when calling the module,
- and this will be translated into a list suitable for the API.
-
+ - This module allows you to add, remove or modify Keycloak roles using the Keycloak REST API. It requires access to the
+ REST API using OpenID Connect; the user connecting and the client being used must have the requisite access rights. In
+ a default Keycloak installation, admin-cli and an admin user would work, as would a separate client definition with the
+ scope tailored to your needs and a user having the expected roles.
+ - The names of module options are snake_cased versions of the camelCase ones found in the Keycloak API and its documentation
+ at U(https://www.keycloak.org/docs-api/8.0/rest-api/index.html).
+ - Attributes are multi-valued in the Keycloak API. All attributes are lists of individual values and are returned that way
+ by this module. You may pass single values for attributes when calling the module, and this is translated into a list
+ suitable for the API.
attributes:
- check_mode:
- support: full
- diff_mode:
- support: full
+ check_mode:
+ support: full
+ diff_mode:
+ support: full
+ action_group:
+ version_added: 10.2.0
options:
- state:
- description:
- - State of the role.
- - On V(present), the role will be created if it does not yet exist, or updated with the parameters you provide.
- - On V(absent), the role will be removed if it exists.
- default: 'present'
- type: str
- choices:
- - present
- - absent
+ state:
+ description:
+ - State of the role.
+ - On V(present), the role is created if it does not yet exist, or updated with the parameters you provide.
+ - On V(absent), the role is removed if it exists.
+ default: 'present'
+ type: str
+ choices:
+ - present
+ - absent
- name:
+ name:
+ type: str
+ required: true
+ description:
+ - Name of the role.
+ - This parameter is required.
+ description:
+ type: str
+ description:
+ - The role description.
+ realm:
+ type: str
+ description:
+ - The Keycloak realm under which this role resides.
+ default: 'master'
+
+ client_id:
+ type: str
+ description:
+ - If the role is a client role, the client ID under which it resides.
+ - If this parameter is absent, the role is considered a realm role.
+ attributes:
+ type: dict
+ description:
+ - A dict of key/value pairs to set as custom attributes for the role.
+ - Values may be single values (for example a string) or a list of strings.
+ composite:
+ description:
+ - If V(true), the role is a composition of other realm and/or client role.
+ default: false
+ type: bool
+ version_added: 7.1.0
+ composites:
+ description:
+ - List of roles to include to the composite realm role.
+ - If the composite role is a client role, the C(clientId) (not ID of the client) must be specified.
+ default: []
+ type: list
+ elements: dict
+ version_added: 7.1.0
+ suboptions:
+ name:
+ description:
+ - Name of the role. This can be the name of a REALM role or a client role.
type: str
required: true
+ client_id:
description:
- - Name of the role.
- - This parameter is required.
-
- description:
+ - Client ID if the role is a client role. Do not include this option for a REALM role.
+ - Use the client ID you can see in the Keycloak console, not the technical ID of the client.
type: str
+ required: false
+ aliases:
+ - clientId
+ state:
description:
- - The role description.
-
- realm:
+ - Create the composite if present, remove it if absent.
type: str
- description:
- - The Keycloak realm under which this role resides.
- default: 'master'
-
- client_id:
- type: str
- description:
- - If the role is a client role, the client id under which it resides.
- - If this parameter is absent, the role is considered a realm role.
-
- attributes:
- type: dict
- description:
- - A dict of key/value pairs to set as custom attributes for the role.
- - Values may be single values (e.g. a string) or a list of strings.
- composite:
- description:
- - If V(true), the role is a composition of other realm and/or client role.
- default: false
- type: bool
- version_added: 7.1.0
- composites:
- description:
- - List of roles to include to the composite realm role.
- - If the composite role is a client role, the C(clientId) (not ID of the client) must be specified.
- default: []
- type: list
- elements: dict
- version_added: 7.1.0
- suboptions:
- name:
- description:
- - Name of the role. This can be the name of a REALM role or a client role.
- type: str
- required: true
- client_id:
- description:
- - Client ID if the role is a client role. Do not include this option for a REALM role.
- - Use the client ID you can see in the Keycloak console, not the technical ID of the client.
- type: str
- required: false
- aliases:
- - clientId
- state:
- description:
- - Create the composite if present, remove it if absent.
- type: str
- choices:
- - present
- - absent
- default: present
+ choices:
+ - present
+ - absent
+ default: present
extends_documentation_fragment:
- - community.general.keycloak
- - community.general.attributes
+ - community.general.keycloak
+ - community.general.keycloak.actiongroup_keycloak
+ - community.general.attributes
author:
- - Laurent Paumier (@laurpaum)
-'''
+ - Laurent Paumier (@laurpaum)
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Create a Keycloak realm role, authentication with credentials
community.general.keycloak_role:
name: my-new-kc-role
@@ -178,60 +173,60 @@ EXAMPLES = '''
auth_password: PASSWORD
name: my-new-role
attributes:
- attrib1: value1
- attrib2: value2
- attrib3:
- - with
- - numerous
- - individual
- - list
- - items
+ attrib1: value1
+ attrib2: value2
+ attrib3:
+ - with
+ - numerous
+ - individual
+ - list
+ - items
delegate_to: localhost
-'''
+"""
-RETURN = '''
+RETURN = r"""
msg:
- description: Message as to what action was taken.
- returned: always
- type: str
- sample: "Role myrole has been updated"
+ description: Message as to what action was taken.
+ returned: always
+ type: str
+ sample: "Role myrole has been updated"
proposed:
- description: Representation of proposed role.
- returned: always
- type: dict
- sample: {
- "description": "My updated test description"
- }
+ description: Representation of proposed role.
+ returned: always
+ type: dict
+ sample: {"description": "My updated test description"}
existing:
- description: Representation of existing role.
- returned: always
- type: dict
- sample: {
- "attributes": {},
- "clientRole": true,
- "composite": false,
- "containerId": "9f03eb61-a826-4771-a9fd-930e06d2d36a",
- "description": "My client test role",
- "id": "561703dd-0f38-45ff-9a5a-0c978f794547",
- "name": "myrole"
+ description: Representation of existing role.
+ returned: always
+ type: dict
+ sample:
+ {
+ "attributes": {},
+ "clientRole": true,
+ "composite": false,
+ "containerId": "9f03eb61-a826-4771-a9fd-930e06d2d36a",
+ "description": "My client test role",
+ "id": "561703dd-0f38-45ff-9a5a-0c978f794547",
+ "name": "myrole"
}
end_state:
- description: Representation of role after module execution (sample is truncated).
- returned: on success
- type: dict
- sample: {
- "attributes": {},
- "clientRole": true,
- "composite": false,
- "containerId": "9f03eb61-a826-4771-a9fd-930e06d2d36a",
- "description": "My updated client test role",
- "id": "561703dd-0f38-45ff-9a5a-0c978f794547",
- "name": "myrole"
+ description: Representation of role after module execution (sample is truncated).
+ returned: on success
+ type: dict
+ sample:
+ {
+ "attributes": {},
+ "clientRole": true,
+ "composite": false,
+ "containerId": "9f03eb61-a826-4771-a9fd-930e06d2d36a",
+ "description": "My updated client test role",
+ "id": "561703dd-0f38-45ff-9a5a-0c978f794547",
+ "name": "myrole"
}
-'''
+"""
from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, camel, \
keycloak_argument_spec, get_token, KeycloakError, is_struct_included
@@ -249,7 +244,7 @@ def main():
composites_spec = dict(
name=dict(type='str', required=True),
- client_id=dict(type='str', aliases=['clientId'], required=False),
+ client_id=dict(type='str', aliases=['clientId']),
state=dict(type='str', default='present', choices=['present', 'absent'])
)
@@ -268,8 +263,10 @@ def main():
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True,
- required_one_of=([['token', 'auth_realm', 'auth_username', 'auth_password']]),
- required_together=([['auth_realm', 'auth_username', 'auth_password']]))
+ required_one_of=([['token', 'auth_realm', 'auth_username', 'auth_password', 'auth_client_id', 'auth_client_secret']]),
+ required_together=([['auth_username', 'auth_password']]),
+ required_by={'refresh_token': 'auth_realm'},
+ )
result = dict(changed=False, msg='', diff={}, proposed={}, existing={}, end_state={})
@@ -287,7 +284,7 @@ def main():
state = module.params.get('state')
# attributes in Keycloak have their values returned as lists
- # via the API. attributes is a dict, so we'll transparently convert
+ # using the API. attributes is a dict, so we'll transparently convert
# the values to lists.
if module.params.get('attributes') is not None:
for key, val in module.params['attributes'].items():
diff --git a/plugins/modules/keycloak_user.py b/plugins/modules/keycloak_user.py
index 1aeff0da5f..2b3c838483 100644
--- a/plugins/modules/keycloak_user.py
+++ b/plugins/modules/keycloak_user.py
@@ -9,222 +9,227 @@ from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
module: keycloak_user
short_description: Create and configure a user in Keycloak
description:
- - This module creates, removes, or updates Keycloak users.
+ - This module creates, removes, or updates Keycloak users.
version_added: 7.1.0
options:
- auth_username:
- aliases: []
- realm:
+ auth_username:
+ aliases: []
+ realm:
+ description:
+ - The name of the realm in which is the client.
+ default: master
+ type: str
+ username:
+ description:
+ - Username for the user.
+ required: true
+ type: str
+ id:
+ description:
+ - ID of the user on the Keycloak server if known.
+ type: str
+ enabled:
+ description:
+ - Enabled user.
+ type: bool
+ email_verified:
+ description:
+ - Check the validity of user email.
+ default: false
+ type: bool
+ aliases:
+ - emailVerified
+ first_name:
+ description:
+ - The user's first name.
+ required: false
+ type: str
+ aliases:
+ - firstName
+ last_name:
+ description:
+ - The user's last name.
+ required: false
+ type: str
+ aliases:
+ - lastName
+ email:
+ description:
+ - User email.
+ required: false
+ type: str
+ federation_link:
+ description:
+ - Federation Link.
+ required: false
+ type: str
+ aliases:
+ - federationLink
+ service_account_client_id:
+ description:
+ - Description of the client Application.
+ required: false
+ type: str
+ aliases:
+ - serviceAccountClientId
+ client_consents:
+ description:
+ - Client Authenticator Type.
+ type: list
+ elements: dict
+ default: []
+ aliases:
+ - clientConsents
+ suboptions:
+ client_id:
description:
- - The name of the realm in which is the client.
- default: master
+ - Client ID of the client role. Not the technical ID of the client.
type: str
- username:
- description:
- - Username for the user.
required: true
- type: str
- id:
- description:
- - ID of the user on the Keycloak server if known.
- type: str
- enabled:
- description:
- - Enabled user.
- type: bool
- email_verified:
- description:
- - Check the validity of user email.
- default: false
- type: bool
aliases:
- - emailVerified
- first_name:
+ - clientId
+ roles:
description:
- - The user's first name.
- required: false
- type: str
- aliases:
- - firstName
- last_name:
- description:
- - The user's last name.
- required: false
- type: str
- aliases:
- - lastName
- email:
- description:
- - User email.
- required: false
- type: str
- federation_link:
- description:
- - Federation Link.
- required: false
- type: str
- aliases:
- - federationLink
- service_account_client_id:
- description:
- - Description of the client Application.
- required: false
- type: str
- aliases:
- - serviceAccountClientId
- client_consents:
- description:
- - Client Authenticator Type.
- type: list
- elements: dict
- default: []
- aliases:
- - clientConsents
- suboptions:
- client_id:
- description:
- - Client ID of the client role. Not the technical ID of the client.
- type: str
- required: true
- aliases:
- - clientId
- roles:
- description:
- - List of client roles to assign to the user.
- type: list
- required: true
- elements: str
- groups:
- description:
- - List of groups for the user.
- type: list
- elements: dict
- default: []
- suboptions:
- name:
- description:
- - Name of the group.
- type: str
- state:
- description:
- - Control whether the user must be member of this group or not.
- choices: [ "present", "absent" ]
- default: present
- type: str
- credentials:
- description:
- - User credentials.
- default: []
- type: list
- elements: dict
- suboptions:
- type:
- description:
- - Credential type.
- type: str
- required: true
- value:
- description:
- - Value of the credential.
- type: str
- required: true
- temporary:
- description:
- - If V(true), the users are required to reset their credentials at next login.
- type: bool
- default: false
- required_actions:
- description:
- - RequiredActions user Auth.
- default: []
+ - List of client roles to assign to the user.
type: list
+ required: true
elements: str
- aliases:
- - requiredActions
- federated_identities:
+ groups:
+ description:
+ - List of groups for the user.
+ - Groups can be referenced by their name, like V(staff), or their path, like V(/staff/engineering). The path syntax
+ allows you to reference subgroups, which is not possible otherwise.
+ - Using the path is possible since community.general 10.6.0.
+ type: list
+ elements: dict
+ default: []
+ suboptions:
+ name:
description:
- - List of IDPs of user.
- default: []
- type: list
- elements: str
- aliases:
- - federatedIdentities
- attributes:
- description:
- - List of user attributes.
- required: false
- type: list
- elements: dict
- suboptions:
- name:
- description:
- - Name of the attribute.
- type: str
- values:
- description:
- - Values for the attribute as list.
- type: list
- elements: str
- state:
- description:
- - Control whether the attribute must exists or not.
- choices: [ "present", "absent" ]
- default: present
- type: str
- access:
- description:
- - list user access.
- required: false
- type: dict
- disableable_credential_types:
- description:
- - list user Credential Type.
- default: []
- type: list
- elements: str
- aliases:
- - disableableCredentialTypes
- origin:
- description:
- - user origin.
- required: false
+ - Name of the group.
type: str
- self:
+ state:
description:
- - user self administration.
- required: false
- type: str
- state:
- description:
- - Control whether the user should exists or not.
- choices: [ "present", "absent" ]
+ - Control whether the user must be member of this group or not.
+ choices: ["present", "absent"]
default: present
type: str
- force:
+ credentials:
+ description:
+ - User credentials.
+ default: []
+ type: list
+ elements: dict
+ suboptions:
+ type:
description:
- - If V(true), allows to remove user and recreate it.
+ - Credential type.
+ type: str
+ required: true
+ value:
+ description:
+ - Value of the credential.
+ type: str
+ required: true
+ temporary:
+ description:
+ - If V(true), the users are required to reset their credentials at next login.
type: bool
default: false
+ required_actions:
+ description:
+ - RequiredActions user Auth.
+ default: []
+ type: list
+ elements: str
+ aliases:
+ - requiredActions
+ federated_identities:
+ description:
+ - List of IDPs of user.
+ default: []
+ type: list
+ elements: str
+ aliases:
+ - federatedIdentities
+ attributes:
+ description:
+ - List of user attributes.
+ required: false
+ type: list
+ elements: dict
+ suboptions:
+ name:
+ description:
+ - Name of the attribute.
+ type: str
+ values:
+ description:
+ - Values for the attribute as list.
+ type: list
+ elements: str
+ state:
+ description:
+ - Control whether the attribute must exists or not.
+ choices: ["present", "absent"]
+ default: present
+ type: str
+ access:
+ description:
+ - List user access.
+ required: false
+ type: dict
+ disableable_credential_types:
+ description:
+ - List user Credential Type.
+ default: []
+ type: list
+ elements: str
+ aliases:
+ - disableableCredentialTypes
+ origin:
+ description:
+ - User origin.
+ required: false
+ type: str
+ self:
+ description:
+ - User self administration.
+ required: false
+ type: str
+ state:
+ description:
+ - Control whether the user should exists or not.
+ choices: ["present", "absent"]
+ default: present
+ type: str
+ force:
+ description:
+ - If V(true), allows to remove user and recreate it.
+ type: bool
+ default: false
extends_documentation_fragment:
- - community.general.keycloak
- - community.general.attributes
+ - community.general.keycloak
+ - community.general.keycloak.actiongroup_keycloak
+ - community.general.attributes
attributes:
- check_mode:
- support: full
- diff_mode:
- support: full
+ check_mode:
+ support: full
+ diff_mode:
+ support: full
+ action_group:
+ version_added: 10.2.0
notes:
- - The module does not modify the user ID of an existing user.
+ - The module does not modify the user ID of an existing user.
author:
- - Philippe Gauthier (@elfelip)
-'''
+ - Philippe Gauthier (@elfelip)
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Create a user user1
community.general.keycloak_user:
auth_keycloak_url: http://localhost:8080/auth
@@ -238,21 +243,21 @@ EXAMPLES = '''
enabled: true
emailVerified: false
credentials:
- - type: password
- value: password
- temporary: false
+ - type: password
+ value: password
+ temporary: false
attributes:
- - name: attr1
- values:
- - value1
- state: present
- - name: attr2
- values:
- - value2
- state: absent
+ - name: attr1
+ values:
+ - value1
+ state: present
+ - name: attr2
+ values:
+ - value2
+ state: absent
groups:
- - name: group1
- state: present
+ - name: group1
+ state: present
state: present
- name: Re-create a User
@@ -268,21 +273,21 @@ EXAMPLES = '''
enabled: true
emailVerified: false
credentials:
- - type: password
- value: password
- temporary: false
+ - type: password
+ value: password
+ temporary: false
attributes:
- - name: attr1
- values:
- - value1
- state: present
- - name: attr2
- values:
- - value2
- state: absent
+ - name: attr1
+ values:
+ - value1
+ state: present
+ - name: attr2
+ values:
+ - value2
+ state: absent
groups:
- - name: group1
- state: present
+ - name: group1
+ state: present
state: present
- name: Re-create a User
@@ -298,21 +303,21 @@ EXAMPLES = '''
enabled: true
emailVerified: false
credentials:
- - type: password
- value: password
- temporary: false
+ - type: password
+ value: password
+ temporary: false
attributes:
- - name: attr1
- values:
- - value1
- state: present
- - name: attr2
- values:
- - value2
- state: absent
+ - name: attr1
+ values:
+ - value1
+ state: present
+ - name: attr2
+ values:
+ - value2
+ state: absent
groups:
- - name: group1
- state: present
+ - name: group1
+ state: present
state: present
force: true
@@ -324,14 +329,9 @@ EXAMPLES = '''
realm: master
username: user1
state: absent
-'''
+"""
-RETURN = '''
-msg:
- description: Message as to what action was taken.
- returned: always
- type: str
- sample: User f18c709c-03d6-11ee-970b-c74bf2721112 created
+RETURN = r"""
proposed:
description: Representation of the proposed user.
returned: on success
@@ -341,14 +341,11 @@ existing:
returned: on success
type: dict
end_state:
- description: Representation of the user after module execution
+ description: Representation of the user after module execution.
returned: on success
type: dict
-changed:
- description: Return V(true) if the operation changed the user on the keycloak server, V(false) otherwise.
- returned: always
- type: bool
-'''
+"""
+
from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, camel, \
keycloak_argument_spec, get_token, KeycloakError, is_struct_included
from ansible.module_utils.basic import AnsibleModule
@@ -404,8 +401,10 @@ def main():
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True,
- required_one_of=([['token', 'auth_realm', 'auth_username', 'auth_password']]),
- required_together=([['auth_realm', 'auth_username', 'auth_password']]))
+ required_one_of=([['token', 'auth_realm', 'auth_username', 'auth_password', 'auth_client_id', 'auth_client_secret']]),
+ required_together=([['auth_username', 'auth_password']]),
+ required_by={'refresh_token': 'auth_realm'},
+ )
result = dict(changed=False, msg='', diff={}, proposed={}, existing={}, end_state={})
diff --git a/plugins/modules/keycloak_user_federation.py b/plugins/modules/keycloak_user_federation.py
index 215aa7f4ca..3290ab8dd9 100644
--- a/plugins/modules/keycloak_user_federation.py
+++ b/plugins/modules/keycloak_user_federation.py
@@ -8,740 +8,717 @@
from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
module: keycloak_user_federation
-short_description: Allows administration of Keycloak user federations via Keycloak API
+short_description: Allows administration of Keycloak user federations using Keycloak API
version_added: 3.7.0
description:
- - This module allows you to add, remove or modify Keycloak user federations via the Keycloak REST API.
- It requires access to the REST API via OpenID Connect; the user connecting and the client being
- used must have the requisite access rights. In a default Keycloak installation, admin-cli
- and an admin user would work, as would a separate client definition with the scope tailored
- to your needs and a user having the expected roles.
-
- - The names of module options are snake_cased versions of the camelCase ones found in the
- Keycloak API and its documentation at U(https://www.keycloak.org/docs-api/20.0.2/rest-api/index.html).
-
+ - This module allows you to add, remove or modify Keycloak user federations using the Keycloak REST API. It requires access
+ to the REST API using OpenID Connect; the user connecting and the client being used must have the requisite access rights.
+ In a default Keycloak installation, admin-cli and an admin user would work, as would a separate client definition with
+ the scope tailored to your needs and a user having the expected roles.
+ - The names of module options are snake_cased versions of the camelCase ones found in the Keycloak API and its documentation
+ at U(https://www.keycloak.org/docs-api/20.0.2/rest-api/index.html).
attributes:
- check_mode:
- support: full
- diff_mode:
- support: full
+ check_mode:
+ support: full
+ diff_mode:
+ support: full
+ action_group:
+ version_added: 10.2.0
options:
- state:
- description:
- - State of the user federation.
- - On V(present), the user federation will be created if it does not yet exist, or updated with
- the parameters you provide.
- - On V(absent), the user federation will be removed if it exists.
- default: 'present'
- type: str
- choices:
- - present
- - absent
+ state:
+ description:
+ - State of the user federation.
+ - On V(present), the user federation is created if it does not yet exist, or updated with the parameters you provide.
+ - On V(absent), the user federation is removed if it exists.
+ default: 'present'
+ type: str
+ choices:
+ - present
+ - absent
- realm:
- description:
- - The Keycloak realm under which this user federation resides.
- default: 'master'
- type: str
+ realm:
+ description:
+ - The Keycloak realm under which this user federation resides.
+ default: 'master'
+ type: str
- id:
- description:
- - The unique ID for this user federation. If left empty, the user federation will be searched
- by its O(name).
- type: str
+ id:
+ description:
+ - The unique ID for this user federation. If left empty, the user federation is searched by its O(name).
+ type: str
- name:
- description:
- - Display name of provider when linked in admin console.
- type: str
+ name:
+ description:
+ - Display name of provider when linked in admin console.
+ type: str
- provider_id:
- description:
- - Provider for this user federation. Built-in providers are V(ldap), V(kerberos), and V(sssd).
- Custom user storage providers can also be used.
- aliases:
- - providerId
- type: str
+ provider_id:
+ description:
+ - Provider for this user federation. Built-in providers are V(ldap), V(kerberos), and V(sssd). Custom user storage providers
+ can also be used.
+ aliases:
+ - providerId
+ type: str
- provider_type:
- description:
- - Component type for user federation (only supported value is V(org.keycloak.storage.UserStorageProvider)).
- aliases:
- - providerType
- default: org.keycloak.storage.UserStorageProvider
- type: str
+ provider_type:
+ description:
+ - Component type for user federation (only supported value is V(org.keycloak.storage.UserStorageProvider)).
+ aliases:
+ - providerType
+ default: org.keycloak.storage.UserStorageProvider
+ type: str
- parent_id:
- description:
- - Unique ID for the parent of this user federation. Realm ID will be automatically used if left blank.
- aliases:
- - parentId
- type: str
+ parent_id:
+ description:
+ - Unique ID for the parent of this user federation. Realm ID is automatically used if left blank.
+ aliases:
+ - parentId
+ type: str
- remove_unspecified_mappers:
+ remove_unspecified_mappers:
+ description:
+ - Remove mappers that are not specified in the configuration for this federation.
+ - Set to V(false) to keep mappers that are not listed in O(mappers).
+ type: bool
+ default: true
+ version_added: 9.4.0
+
+ bind_credential_update_mode:
+ description:
+ - The value of the config parameter O(config.bindCredential) is redacted in the Keycloak responses. Comparing the redacted
+ value with the desired value always evaluates to not equal. This means the before and desired states are never equal
+ if the parameter is set.
+ - Set to V(always) to include O(config.bindCredential) in the comparison of before and desired state. Because of the
+ redacted value returned by Keycloak the module always detects a change and make an update if a O(config.bindCredential)
+ value is set.
+ - Set to V(only_indirect) to exclude O(config.bindCredential) when comparing the before state with the desired state.
+ The value of O(config.bindCredential) is only updated if there are other changes to the user federation that require
+ an update.
+ type: str
+ default: always
+ choices:
+ - always
+ - only_indirect
+ version_added: 9.5.0
+
+ config:
+ description:
+ - Dict specifying the configuration options for the provider; the contents differ depending on the value of O(provider_id).
+ Examples are given below for V(ldap), V(kerberos) and V(sssd). It is easiest to obtain valid config values by dumping
+ an already-existing user federation configuration through check-mode in the RV(existing) field.
+ - The value V(sssd) has been supported since community.general 4.2.0.
+ type: dict
+ suboptions:
+ enabled:
description:
- - Remove mappers that are not specified in the configuration for this federation.
- - Set to V(false) to keep mappers that are not listed in O(mappers).
- type: bool
+ - Enable/disable this user federation.
default: true
- version_added: 9.4.0
+ type: bool
- bind_credential_update_mode:
+ priority:
description:
- - The value of the config parameter O(config.bindCredential) is redacted in the Keycloak responses.
- Comparing the redacted value with the desired value always evaluates to not equal. This means
- the before and desired states are never equal if the parameter is set.
- - Set to V(always) to include O(config.bindCredential) in the comparison of before and desired state.
- Because of the redacted value returned by Keycloak the module will always detect a change
- and make an update if a O(config.bindCredential) value is set.
- - Set to V(only_indirect) to exclude O(config.bindCredential) when comparing the before state with the
- desired state. The value of O(config.bindCredential) will only be updated if there are other changes
- to the user federation that require an update.
+ - Priority of provider when doing a user lookup. Lowest first.
+ default: 0
+ type: int
+
+ importEnabled:
+ description:
+ - If V(true), LDAP users are imported into Keycloak DB and synced by the configured sync policies.
+ default: true
+ type: bool
+
+ editMode:
+ description:
+ - V(READ_ONLY) is a read-only LDAP store. V(WRITABLE) means data is synced back to LDAP on demand. V(UNSYNCED) means
+ user data is imported, but not synced back to LDAP.
type: str
- default: always
choices:
- - always
- - only_indirect
+ - READ_ONLY
+ - WRITABLE
+ - UNSYNCED
+
+ syncRegistrations:
+ description:
+ - Should newly created users be created within LDAP store? Priority effects which provider is chosen to sync the
+ new user.
+ default: false
+ type: bool
+
+ vendor:
+ description:
+ - LDAP vendor (provider).
+ - Use short name. For instance, write V(rhds) for "Red Hat Directory Server".
+ type: str
+
+ usernameLDAPAttribute:
+ description:
+ - Name of LDAP attribute, which is mapped as Keycloak username. For many LDAP server vendors it can be V(uid). For
+ Active directory it can be V(sAMAccountName) or V(cn). The attribute should be filled for all LDAP user records
+ you want to import from LDAP to Keycloak.
+ type: str
+
+ rdnLDAPAttribute:
+ description:
+ - Name of LDAP attribute, which is used as RDN (top attribute) of typical user DN. Usually it is the same as Username
+ LDAP attribute, however it is not required. For example for Active directory, it is common to use V(cn) as RDN
+ attribute when username attribute might be V(sAMAccountName).
+ type: str
+
+ uuidLDAPAttribute:
+ description:
+ - Name of LDAP attribute, which is used as unique object identifier (UUID) for objects in LDAP. For many LDAP server
+ vendors, it is V(entryUUID); however some are different. For example for Active directory it should be V(objectGUID).
+ If your LDAP server does not support the notion of UUID, you can use any other attribute that is supposed to be
+ unique among LDAP users in tree.
+ type: str
+
+ userObjectClasses:
+ description:
+ - All values of LDAP objectClass attribute for users in LDAP divided by comma. For example V(inetOrgPerson, organizationalPerson).
+ Newly created Keycloak users are written to LDAP with all those object classes and existing LDAP user records
+ are found just if they contain all those object classes.
+ type: str
+
+ connectionUrl:
+ description:
+ - Connection URL to your LDAP server.
+ type: str
+
+ usersDn:
+ description:
+ - Full DN of LDAP tree where your users are. This DN is the parent of LDAP users.
+ type: str
+
+ customUserSearchFilter:
+ description:
+ - Additional LDAP Filter for filtering searched users. Leave this empty if you do not need additional filter.
+ type: str
+
+ searchScope:
+ description:
+ - For one level, the search applies only for users in the DNs specified by User DNs. For subtree, the search applies
+ to the whole subtree. See LDAP documentation for more details.
+ default: '1'
+ type: str
+ choices:
+ - '1'
+ - '2'
+
+ authType:
+ description:
+ - Type of the Authentication method used during LDAP Bind operation. It is used in most of the requests sent to
+ the LDAP server.
+ default: 'none'
+ type: str
+ choices:
+ - none
+ - simple
+
+ bindDn:
+ description:
+ - DN of LDAP user which is used by Keycloak to access LDAP server.
+ type: str
+
+ bindCredential:
+ description:
+ - Password of LDAP admin.
+ type: str
+
+ startTls:
+ description:
+ - Encrypts the connection to LDAP using STARTTLS, which disables connection pooling.
+ default: false
+ type: bool
+
+ usePasswordModifyExtendedOp:
+ description:
+ - Use the LDAPv3 Password Modify Extended Operation (RFC-3062). The password modify extended operation usually requires
+ that LDAP user already has password in the LDAP server. So when this is used with 'Sync Registrations', it can
+ be good to add also 'Hardcoded LDAP attribute mapper' with randomly generated initial password.
+ default: false
+ type: bool
+
+ validatePasswordPolicy:
+ description:
+ - Determines if Keycloak should validate the password with the realm password policy before updating it.
+ default: false
+ type: bool
+
+ trustEmail:
+ description:
+ - If enabled, email provided by this provider is not verified even if verification is enabled for the realm.
+ default: false
+ type: bool
+
+ useTruststoreSpi:
+ description:
+ - Specifies whether LDAP connection uses the truststore SPI with the truststore configured in standalone.xml/domain.xml.
+ V(always) means that it always uses it. V(never) means that it does not use it. V(ldapsOnly) means that it uses
+ if your connection URL use ldaps.
+ - Note even if standalone.xml/domain.xml is not configured, the default Java cacerts or certificate specified by
+ C(javax.net.ssl.trustStore) property is used.
+ default: ldapsOnly
+ type: str
+ choices:
+ - always
+ - ldapsOnly
+ - never
+
+ connectionTimeout:
+ description:
+ - LDAP Connection Timeout in milliseconds.
+ type: int
+
+ readTimeout:
+ description:
+ - LDAP Read Timeout in milliseconds. This timeout applies for LDAP read operations.
+ type: int
+
+ pagination:
+ description:
+ - Does the LDAP server support pagination.
+ default: true
+ type: bool
+
+ connectionPooling:
+ description:
+ - Determines if Keycloak should use connection pooling for accessing LDAP server.
+ default: true
+ type: bool
+
+ connectionPoolingAuthentication:
+ description:
+ - A list of space-separated authentication types of connections that may be pooled.
+ type: str
+ choices:
+ - none
+ - simple
+ - DIGEST-MD5
+
+ connectionPoolingDebug:
+ description:
+ - A string that indicates the level of debug output to produce. Example valid values are V(fine) (trace connection
+ creation and removal) and V(all) (all debugging information).
+ type: str
+
+ connectionPoolingInitSize:
+ description:
+ - The number of connections per connection identity to create when initially creating a connection for the identity.
+ type: int
+
+ connectionPoolingMaxSize:
+ description:
+ - The maximum number of connections per connection identity that can be maintained concurrently.
+ type: int
+
+ connectionPoolingPrefSize:
+ description:
+ - The preferred number of connections per connection identity that should be maintained concurrently.
+ type: int
+
+ connectionPoolingProtocol:
+ description:
+ - A list of space-separated protocol types of connections that may be pooled. Valid types are V(plain) and V(ssl).
+ type: str
+
+ connectionPoolingTimeout:
+ description:
+ - The number of milliseconds that an idle connection may remain in the pool without being closed and removed from
+ the pool.
+ type: int
+
+ allowKerberosAuthentication:
+ description:
+ - Enable/disable HTTP authentication of users with SPNEGO/Kerberos tokens. The data about authenticated users is
+ provisioned from this LDAP server.
+ default: false
+ type: bool
+
+ kerberosRealm:
+ description:
+ - Name of kerberos realm.
+ type: str
+
+ krbPrincipalAttribute:
+ description:
+ - Name of the LDAP attribute, which refers to Kerberos principal. This is used to lookup appropriate LDAP user after
+ successful Kerberos/SPNEGO authentication in Keycloak. When this is empty, the LDAP user is looked up based on
+ LDAP username corresponding to the first part of his Kerberos principal. For instance, for principal C(john@KEYCLOAK.ORG),
+ it assumes that LDAP username is V(john).
+ type: str
+ version_added: 8.1.0
+
+ serverPrincipal:
+ description:
+ - Full name of server principal for HTTP service including server and domain name. For example V(HTTP/host.foo.org@FOO.ORG).
+ Use V(*) to accept any service principal in the KeyTab file.
+ type: str
+
+ keyTab:
+ description:
+ - Location of Kerberos KeyTab file containing the credentials of server principal. For example V(/etc/krb5.keytab).
+ type: str
+
+ debug:
+ description:
+ - Enable/disable debug logging to standard output for Krb5LoginModule.
+ type: bool
+
+ useKerberosForPasswordAuthentication:
+ description:
+ - Use Kerberos login module for authenticate username/password against Kerberos server instead of authenticating
+ against LDAP server with Directory Service API.
+ default: false
+ type: bool
+
+ allowPasswordAuthentication:
+ description:
+ - Enable/disable possibility of username/password authentication against Kerberos database.
+ type: bool
+
+ batchSizeForSync:
+ description:
+ - Count of LDAP users to be imported from LDAP to Keycloak within a single transaction.
+ default: 1000
+ type: int
+
+ fullSyncPeriod:
+ description:
+ - Period for full synchronization in seconds.
+ default: -1
+ type: int
+
+ changedSyncPeriod:
+ description:
+ - Period for synchronization of changed or newly created LDAP users in seconds.
+ default: -1
+ type: int
+
+ updateProfileFirstLogin:
+ description:
+ - Update profile on first login.
+ type: bool
+
+ cachePolicy:
+ description:
+ - Cache Policy for this storage provider.
+ type: str
+ default: 'DEFAULT'
+ choices:
+ - DEFAULT
+ - EVICT_DAILY
+ - EVICT_WEEKLY
+ - MAX_LIFESPAN
+ - NO_CACHE
+
+ evictionDay:
+ description:
+ - Day of the week the entry is set to become invalid on.
+ type: str
+
+ evictionHour:
+ description:
+ - Hour of day the entry is set to become invalid on.
+ type: str
+
+ evictionMinute:
+ description:
+ - Minute of day the entry is set to become invalid on.
+ type: str
+
+ maxLifespan:
+ description:
+ - Max lifespan of cache entry in milliseconds.
+ type: int
+
+ referral:
+ description:
+ - Specifies if LDAP referrals should be followed or ignored. Please note that enabling referrals can slow down authentication
+ as it allows the LDAP server to decide which other LDAP servers to use. This could potentially include untrusted
+ servers.
+ type: str
+ choices:
+ - ignore
+ - follow
version_added: 9.5.0
- config:
+ mappers:
+ description:
+ - A list of dicts defining mappers associated with this Identity Provider.
+ type: list
+ elements: dict
+ suboptions:
+ id:
description:
- - Dict specifying the configuration options for the provider; the contents differ depending on
- the value of O(provider_id). Examples are given below for V(ldap), V(kerberos) and V(sssd).
- It is easiest to obtain valid config values by dumping an already-existing user federation
- configuration through check-mode in the RV(existing) field.
- - The value V(sssd) has been supported since community.general 4.2.0.
+ - Unique ID of this mapper.
+ type: str
+
+ name:
+ description:
+ - Name of the mapper. If no ID is given, the mapper is searched by name.
+ type: str
+
+ parentId:
+ description:
+ - Unique ID for the parent of this mapper. ID of the user federation is automatically used if left blank.
+ type: str
+
+ providerId:
+ description:
+ - The mapper type for this mapper (for instance V(user-attribute-ldap-mapper)).
+ type: str
+
+ providerType:
+ description:
+ - Component type for this mapper.
+ type: str
+ default: org.keycloak.storage.ldap.mappers.LDAPStorageMapper
+
+ config:
+ description:
+ - Dict specifying the configuration options for the mapper; the contents differ depending on the value of I(identityProviderMapper).
type: dict
- suboptions:
- enabled:
- description:
- - Enable/disable this user federation.
- default: true
- type: bool
-
- priority:
- description:
- - Priority of provider when doing a user lookup. Lowest first.
- default: 0
- type: int
-
- importEnabled:
- description:
- - If V(true), LDAP users will be imported into Keycloak DB and synced by the configured
- sync policies.
- default: true
- type: bool
-
- editMode:
- description:
- - V(READ_ONLY) is a read-only LDAP store. V(WRITABLE) means data will be synced back to LDAP
- on demand. V(UNSYNCED) means user data will be imported, but not synced back to LDAP.
- type: str
- choices:
- - READ_ONLY
- - WRITABLE
- - UNSYNCED
-
- syncRegistrations:
- description:
- - Should newly created users be created within LDAP store? Priority effects which
- provider is chosen to sync the new user.
- default: false
- type: bool
-
- vendor:
- description:
- - LDAP vendor (provider).
- - Use short name. For instance, write V(rhds) for "Red Hat Directory Server".
- type: str
-
- usernameLDAPAttribute:
- description:
- - Name of LDAP attribute, which is mapped as Keycloak username. For many LDAP server
- vendors it can be V(uid). For Active directory it can be V(sAMAccountName) or V(cn).
- The attribute should be filled for all LDAP user records you want to import from
- LDAP to Keycloak.
- type: str
-
- rdnLDAPAttribute:
- description:
- - Name of LDAP attribute, which is used as RDN (top attribute) of typical user DN.
- Usually it's the same as Username LDAP attribute, however it is not required. For
- example for Active directory, it is common to use V(cn) as RDN attribute when
- username attribute might be V(sAMAccountName).
- type: str
-
- uuidLDAPAttribute:
- description:
- - Name of LDAP attribute, which is used as unique object identifier (UUID) for objects
- in LDAP. For many LDAP server vendors, it is V(entryUUID); however some are different.
- For example for Active directory it should be V(objectGUID). If your LDAP server does
- not support the notion of UUID, you can use any other attribute that is supposed to
- be unique among LDAP users in tree.
- type: str
-
- userObjectClasses:
- description:
- - All values of LDAP objectClass attribute for users in LDAP divided by comma.
- For example V(inetOrgPerson, organizationalPerson). Newly created Keycloak users
- will be written to LDAP with all those object classes and existing LDAP user records
- are found just if they contain all those object classes.
- type: str
-
- connectionUrl:
- description:
- - Connection URL to your LDAP server.
- type: str
-
- usersDn:
- description:
- - Full DN of LDAP tree where your users are. This DN is the parent of LDAP users.
- type: str
-
- customUserSearchFilter:
- description:
- - Additional LDAP Filter for filtering searched users. Leave this empty if you don't
- need additional filter.
- type: str
-
- searchScope:
- description:
- - For one level, the search applies only for users in the DNs specified by User DNs.
- For subtree, the search applies to the whole subtree. See LDAP documentation for
- more details.
- default: '1'
- type: str
- choices:
- - '1'
- - '2'
-
- authType:
- description:
- - Type of the Authentication method used during LDAP Bind operation. It is used in
- most of the requests sent to the LDAP server.
- default: 'none'
- type: str
- choices:
- - none
- - simple
-
- bindDn:
- description:
- - DN of LDAP user which will be used by Keycloak to access LDAP server.
- type: str
-
- bindCredential:
- description:
- - Password of LDAP admin.
- type: str
-
- startTls:
- description:
- - Encrypts the connection to LDAP using STARTTLS, which will disable connection pooling.
- default: false
- type: bool
-
- usePasswordModifyExtendedOp:
- description:
- - Use the LDAPv3 Password Modify Extended Operation (RFC-3062). The password modify
- extended operation usually requires that LDAP user already has password in the LDAP
- server. So when this is used with 'Sync Registrations', it can be good to add also
- 'Hardcoded LDAP attribute mapper' with randomly generated initial password.
- default: false
- type: bool
-
- validatePasswordPolicy:
- description:
- - Determines if Keycloak should validate the password with the realm password policy
- before updating it.
- default: false
- type: bool
-
- trustEmail:
- description:
- - If enabled, email provided by this provider is not verified even if verification is
- enabled for the realm.
- default: false
- type: bool
-
- useTruststoreSpi:
- description:
- - Specifies whether LDAP connection will use the truststore SPI with the truststore
- configured in standalone.xml/domain.xml. V(always) means that it will always use it.
- V(never) means that it will not use it. V(ldapsOnly) means that it will use if
- your connection URL use ldaps. Note even if standalone.xml/domain.xml is not
- configured, the default Java cacerts or certificate specified by
- C(javax.net.ssl.trustStore) property will be used.
- default: ldapsOnly
- type: str
- choices:
- - always
- - ldapsOnly
- - never
-
- connectionTimeout:
- description:
- - LDAP Connection Timeout in milliseconds.
- type: int
-
- readTimeout:
- description:
- - LDAP Read Timeout in milliseconds. This timeout applies for LDAP read operations.
- type: int
-
- pagination:
- description:
- - Does the LDAP server support pagination.
- default: true
- type: bool
-
- connectionPooling:
- description:
- - Determines if Keycloak should use connection pooling for accessing LDAP server.
- default: true
- type: bool
-
- connectionPoolingAuthentication:
- description:
- - A list of space-separated authentication types of connections that may be pooled.
- type: str
- choices:
- - none
- - simple
- - DIGEST-MD5
-
- connectionPoolingDebug:
- description:
- - A string that indicates the level of debug output to produce. Example valid values are
- V(fine) (trace connection creation and removal) and V(all) (all debugging information).
- type: str
-
- connectionPoolingInitSize:
- description:
- - The number of connections per connection identity to create when initially creating a
- connection for the identity.
- type: int
-
- connectionPoolingMaxSize:
- description:
- - The maximum number of connections per connection identity that can be maintained
- concurrently.
- type: int
-
- connectionPoolingPrefSize:
- description:
- - The preferred number of connections per connection identity that should be maintained
- concurrently.
- type: int
-
- connectionPoolingProtocol:
- description:
- - A list of space-separated protocol types of connections that may be pooled.
- Valid types are V(plain) and V(ssl).
- type: str
-
- connectionPoolingTimeout:
- description:
- - The number of milliseconds that an idle connection may remain in the pool without
- being closed and removed from the pool.
- type: int
-
- allowKerberosAuthentication:
- description:
- - Enable/disable HTTP authentication of users with SPNEGO/Kerberos tokens. The data
- about authenticated users will be provisioned from this LDAP server.
- default: false
- type: bool
-
- kerberosRealm:
- description:
- - Name of kerberos realm.
- type: str
-
- krbPrincipalAttribute:
- description:
- - Name of the LDAP attribute, which refers to Kerberos principal.
- This is used to lookup appropriate LDAP user after successful Kerberos/SPNEGO authentication in Keycloak.
- When this is empty, the LDAP user will be looked based on LDAP username corresponding
- to the first part of his Kerberos principal. For instance, for principal C(john@KEYCLOAK.ORG),
- it will assume that LDAP username is V(john).
- type: str
- version_added: 8.1.0
-
- serverPrincipal:
- description:
- - Full name of server principal for HTTP service including server and domain name. For
- example V(HTTP/host.foo.org@FOO.ORG). Use V(*) to accept any service principal in the
- KeyTab file.
- type: str
-
- keyTab:
- description:
- - Location of Kerberos KeyTab file containing the credentials of server principal. For
- example V(/etc/krb5.keytab).
- type: str
-
- debug:
- description:
- - Enable/disable debug logging to standard output for Krb5LoginModule.
- type: bool
-
- useKerberosForPasswordAuthentication:
- description:
- - Use Kerberos login module for authenticate username/password against Kerberos server
- instead of authenticating against LDAP server with Directory Service API.
- default: false
- type: bool
-
- allowPasswordAuthentication:
- description:
- - Enable/disable possibility of username/password authentication against Kerberos database.
- type: bool
-
- batchSizeForSync:
- description:
- - Count of LDAP users to be imported from LDAP to Keycloak within a single transaction.
- default: 1000
- type: int
-
- fullSyncPeriod:
- description:
- - Period for full synchronization in seconds.
- default: -1
- type: int
-
- changedSyncPeriod:
- description:
- - Period for synchronization of changed or newly created LDAP users in seconds.
- default: -1
- type: int
-
- updateProfileFirstLogin:
- description:
- - Update profile on first login.
- type: bool
-
- cachePolicy:
- description:
- - Cache Policy for this storage provider.
- type: str
- default: 'DEFAULT'
- choices:
- - DEFAULT
- - EVICT_DAILY
- - EVICT_WEEKLY
- - MAX_LIFESPAN
- - NO_CACHE
-
- evictionDay:
- description:
- - Day of the week the entry will become invalid on.
- type: str
-
- evictionHour:
- description:
- - Hour of day the entry will become invalid on.
- type: str
-
- evictionMinute:
- description:
- - Minute of day the entry will become invalid on.
- type: str
-
- maxLifespan:
- description:
- - Max lifespan of cache entry in milliseconds.
- type: int
-
- referral:
- description:
- - Specifies if LDAP referrals should be followed or ignored. Please note that enabling
- referrals can slow down authentication as it allows the LDAP server to decide which other
- LDAP servers to use. This could potentially include untrusted servers.
- type: str
- choices:
- - ignore
- - follow
- version_added: 9.5.0
-
- mappers:
- description:
- - A list of dicts defining mappers associated with this Identity Provider.
- type: list
- elements: dict
- suboptions:
- id:
- description:
- - Unique ID of this mapper.
- type: str
-
- name:
- description:
- - Name of the mapper. If no ID is given, the mapper will be searched by name.
- type: str
-
- parentId:
- description:
- - Unique ID for the parent of this mapper. ID of the user federation will automatically
- be used if left blank.
- type: str
-
- providerId:
- description:
- - The mapper type for this mapper (for instance V(user-attribute-ldap-mapper)).
- type: str
-
- providerType:
- description:
- - Component type for this mapper.
- type: str
- default: org.keycloak.storage.ldap.mappers.LDAPStorageMapper
-
- config:
- description:
- - Dict specifying the configuration options for the mapper; the contents differ
- depending on the value of I(identityProviderMapper).
- # TODO: what is identityProviderMapper above???
- type: dict
extends_documentation_fragment:
- - community.general.keycloak
- - community.general.attributes
+ - community.general.keycloak
+ - community.general.keycloak.actiongroup_keycloak
+ - community.general.attributes
author:
- - Laurent Paumier (@laurpaum)
-'''
+ - Laurent Paumier (@laurpaum)
+"""
-EXAMPLES = '''
- - name: Create LDAP user federation
- community.general.keycloak_user_federation:
- auth_keycloak_url: https://keycloak.example.com/auth
- auth_realm: master
- auth_username: admin
- auth_password: password
- realm: my-realm
- name: my-ldap
- state: present
- provider_id: ldap
- provider_type: org.keycloak.storage.UserStorageProvider
- config:
- priority: 0
- enabled: true
- cachePolicy: DEFAULT
- batchSizeForSync: 1000
- editMode: READ_ONLY
- importEnabled: true
- syncRegistrations: false
- vendor: other
- usernameLDAPAttribute: uid
- rdnLDAPAttribute: uid
- uuidLDAPAttribute: entryUUID
- userObjectClasses: inetOrgPerson, organizationalPerson
- connectionUrl: ldaps://ldap.example.com:636
- usersDn: ou=Users,dc=example,dc=com
- authType: simple
- bindDn: cn=directory reader
- bindCredential: password
- searchScope: 1
- validatePasswordPolicy: false
- trustEmail: false
- useTruststoreSpi: ldapsOnly
- connectionPooling: true
- pagination: true
- allowKerberosAuthentication: false
- debug: false
- useKerberosForPasswordAuthentication: false
- mappers:
- - name: "full name"
- providerId: "full-name-ldap-mapper"
- providerType: "org.keycloak.storage.ldap.mappers.LDAPStorageMapper"
- config:
- ldap.full.name.attribute: cn
- read.only: true
- write.only: false
+EXAMPLES = r"""
+- name: Create LDAP user federation
+ community.general.keycloak_user_federation:
+ auth_keycloak_url: https://keycloak.example.com/auth
+ auth_realm: master
+ auth_username: admin
+ auth_password: password
+ realm: my-realm
+ name: my-ldap
+ state: present
+ provider_id: ldap
+ provider_type: org.keycloak.storage.UserStorageProvider
+ config:
+ priority: 0
+ enabled: true
+ cachePolicy: DEFAULT
+ batchSizeForSync: 1000
+ editMode: READ_ONLY
+ importEnabled: true
+ syncRegistrations: false
+ vendor: other
+ usernameLDAPAttribute: uid
+ rdnLDAPAttribute: uid
+ uuidLDAPAttribute: entryUUID
+ userObjectClasses: inetOrgPerson, organizationalPerson
+ connectionUrl: ldaps://ldap.example.com:636
+ usersDn: ou=Users,dc=example,dc=com
+ authType: simple
+ bindDn: cn=directory reader
+ bindCredential: password
+ searchScope: 1
+ validatePasswordPolicy: false
+ trustEmail: false
+ useTruststoreSpi: ldapsOnly
+ connectionPooling: true
+ pagination: true
+ allowKerberosAuthentication: false
+ debug: false
+ useKerberosForPasswordAuthentication: false
+ mappers:
+ - name: "full name"
+ providerId: "full-name-ldap-mapper"
+ providerType: "org.keycloak.storage.ldap.mappers.LDAPStorageMapper"
+ config:
+ ldap.full.name.attribute: cn
+ read.only: true
+ write.only: false
- - name: Create Kerberos user federation
- community.general.keycloak_user_federation:
- auth_keycloak_url: https://keycloak.example.com/auth
- auth_realm: master
- auth_username: admin
- auth_password: password
- realm: my-realm
- name: my-kerberos
- state: present
- provider_id: kerberos
- provider_type: org.keycloak.storage.UserStorageProvider
- config:
- priority: 0
- enabled: true
- cachePolicy: DEFAULT
- kerberosRealm: EXAMPLE.COM
- serverPrincipal: HTTP/host.example.com@EXAMPLE.COM
- keyTab: keytab
- allowPasswordAuthentication: false
- updateProfileFirstLogin: false
+- name: Create Kerberos user federation
+ community.general.keycloak_user_federation:
+ auth_keycloak_url: https://keycloak.example.com/auth
+ auth_realm: master
+ auth_username: admin
+ auth_password: password
+ realm: my-realm
+ name: my-kerberos
+ state: present
+ provider_id: kerberos
+ provider_type: org.keycloak.storage.UserStorageProvider
+ config:
+ priority: 0
+ enabled: true
+ cachePolicy: DEFAULT
+ kerberosRealm: EXAMPLE.COM
+ serverPrincipal: HTTP/host.example.com@EXAMPLE.COM
+ keyTab: keytab
+ allowPasswordAuthentication: false
+ updateProfileFirstLogin: false
- - name: Create sssd user federation
- community.general.keycloak_user_federation:
- auth_keycloak_url: https://keycloak.example.com/auth
- auth_realm: master
- auth_username: admin
- auth_password: password
- realm: my-realm
- name: my-sssd
- state: present
- provider_id: sssd
- provider_type: org.keycloak.storage.UserStorageProvider
- config:
- priority: 0
- enabled: true
- cachePolicy: DEFAULT
+- name: Create sssd user federation
+ community.general.keycloak_user_federation:
+ auth_keycloak_url: https://keycloak.example.com/auth
+ auth_realm: master
+ auth_username: admin
+ auth_password: password
+ realm: my-realm
+ name: my-sssd
+ state: present
+ provider_id: sssd
+ provider_type: org.keycloak.storage.UserStorageProvider
+ config:
+ priority: 0
+ enabled: true
+ cachePolicy: DEFAULT
- - name: Delete user federation
- community.general.keycloak_user_federation:
- auth_keycloak_url: https://keycloak.example.com/auth
- auth_realm: master
- auth_username: admin
- auth_password: password
- realm: my-realm
- name: my-federation
- state: absent
+- name: Delete user federation
+ community.general.keycloak_user_federation:
+ auth_keycloak_url: https://keycloak.example.com/auth
+ auth_realm: master
+ auth_username: admin
+ auth_password: password
+ realm: my-realm
+ name: my-federation
+ state: absent
+"""
-'''
-
-RETURN = '''
+RETURN = r"""
msg:
- description: Message as to what action was taken.
- returned: always
- type: str
- sample: "No changes required to user federation 164bb483-c613-482e-80fe-7f1431308799."
+ description: Message as to what action was taken.
+ returned: always
+ type: str
+ sample: "No changes required to user federation 164bb483-c613-482e-80fe-7f1431308799."
proposed:
- description: Representation of proposed user federation.
- returned: always
- type: dict
- sample: {
- "config": {
- "allowKerberosAuthentication": "false",
- "authType": "simple",
- "batchSizeForSync": "1000",
- "bindCredential": "**********",
- "bindDn": "cn=directory reader",
- "cachePolicy": "DEFAULT",
- "connectionPooling": "true",
- "connectionUrl": "ldaps://ldap.example.com:636",
- "debug": "false",
- "editMode": "READ_ONLY",
- "enabled": "true",
- "importEnabled": "true",
- "pagination": "true",
- "priority": "0",
- "rdnLDAPAttribute": "uid",
- "searchScope": "1",
- "syncRegistrations": "false",
- "trustEmail": "false",
- "useKerberosForPasswordAuthentication": "false",
- "useTruststoreSpi": "ldapsOnly",
- "userObjectClasses": "inetOrgPerson, organizationalPerson",
- "usernameLDAPAttribute": "uid",
- "usersDn": "ou=Users,dc=example,dc=com",
- "uuidLDAPAttribute": "entryUUID",
- "validatePasswordPolicy": "false",
- "vendor": "other"
- },
- "name": "ldap",
- "providerId": "ldap",
- "providerType": "org.keycloak.storage.UserStorageProvider"
+ description: Representation of proposed user federation.
+ returned: always
+ type: dict
+ sample:
+ {
+ "config": {
+ "allowKerberosAuthentication": "false",
+ "authType": "simple",
+ "batchSizeForSync": "1000",
+ "bindCredential": "**********",
+ "bindDn": "cn=directory reader",
+ "cachePolicy": "DEFAULT",
+ "connectionPooling": "true",
+ "connectionUrl": "ldaps://ldap.example.com:636",
+ "debug": "false",
+ "editMode": "READ_ONLY",
+ "enabled": "true",
+ "importEnabled": "true",
+ "pagination": "true",
+ "priority": "0",
+ "rdnLDAPAttribute": "uid",
+ "searchScope": "1",
+ "syncRegistrations": "false",
+ "trustEmail": "false",
+ "useKerberosForPasswordAuthentication": "false",
+ "useTruststoreSpi": "ldapsOnly",
+ "userObjectClasses": "inetOrgPerson, organizationalPerson",
+ "usernameLDAPAttribute": "uid",
+ "usersDn": "ou=Users,dc=example,dc=com",
+ "uuidLDAPAttribute": "entryUUID",
+ "validatePasswordPolicy": "false",
+ "vendor": "other"
+ },
+ "name": "ldap",
+ "providerId": "ldap",
+ "providerType": "org.keycloak.storage.UserStorageProvider"
}
existing:
- description: Representation of existing user federation.
- returned: always
- type: dict
- sample: {
- "config": {
- "allowKerberosAuthentication": "false",
- "authType": "simple",
- "batchSizeForSync": "1000",
- "bindCredential": "**********",
- "bindDn": "cn=directory reader",
- "cachePolicy": "DEFAULT",
- "changedSyncPeriod": "-1",
- "connectionPooling": "true",
- "connectionUrl": "ldaps://ldap.example.com:636",
- "debug": "false",
- "editMode": "READ_ONLY",
- "enabled": "true",
- "fullSyncPeriod": "-1",
- "importEnabled": "true",
- "pagination": "true",
- "priority": "0",
- "rdnLDAPAttribute": "uid",
- "searchScope": "1",
- "syncRegistrations": "false",
- "trustEmail": "false",
- "useKerberosForPasswordAuthentication": "false",
- "useTruststoreSpi": "ldapsOnly",
- "userObjectClasses": "inetOrgPerson, organizationalPerson",
- "usernameLDAPAttribute": "uid",
- "usersDn": "ou=Users,dc=example,dc=com",
- "uuidLDAPAttribute": "entryUUID",
- "validatePasswordPolicy": "false",
- "vendor": "other"
- },
- "id": "01122837-9047-4ae4-8ca0-6e2e891a765f",
- "mappers": [
- {
- "config": {
- "always.read.value.from.ldap": "false",
- "is.mandatory.in.ldap": "false",
- "ldap.attribute": "mail",
- "read.only": "true",
- "user.model.attribute": "email"
- },
- "id": "17d60ce2-2d44-4c2c-8b1f-1fba601b9a9f",
- "name": "email",
- "parentId": "01122837-9047-4ae4-8ca0-6e2e891a765f",
- "providerId": "user-attribute-ldap-mapper",
- "providerType": "org.keycloak.storage.ldap.mappers.LDAPStorageMapper"
- }
- ],
- "name": "myfed",
- "parentId": "myrealm",
- "providerId": "ldap",
- "providerType": "org.keycloak.storage.UserStorageProvider"
+ description: Representation of existing user federation.
+ returned: always
+ type: dict
+ sample:
+ {
+ "config": {
+ "allowKerberosAuthentication": "false",
+ "authType": "simple",
+ "batchSizeForSync": "1000",
+ "bindCredential": "**********",
+ "bindDn": "cn=directory reader",
+ "cachePolicy": "DEFAULT",
+ "changedSyncPeriod": "-1",
+ "connectionPooling": "true",
+ "connectionUrl": "ldaps://ldap.example.com:636",
+ "debug": "false",
+ "editMode": "READ_ONLY",
+ "enabled": "true",
+ "fullSyncPeriod": "-1",
+ "importEnabled": "true",
+ "pagination": "true",
+ "priority": "0",
+ "rdnLDAPAttribute": "uid",
+ "searchScope": "1",
+ "syncRegistrations": "false",
+ "trustEmail": "false",
+ "useKerberosForPasswordAuthentication": "false",
+ "useTruststoreSpi": "ldapsOnly",
+ "userObjectClasses": "inetOrgPerson, organizationalPerson",
+ "usernameLDAPAttribute": "uid",
+ "usersDn": "ou=Users,dc=example,dc=com",
+ "uuidLDAPAttribute": "entryUUID",
+ "validatePasswordPolicy": "false",
+ "vendor": "other"
+ },
+ "id": "01122837-9047-4ae4-8ca0-6e2e891a765f",
+ "mappers": [
+ {
+ "config": {
+ "always.read.value.from.ldap": "false",
+ "is.mandatory.in.ldap": "false",
+ "ldap.attribute": "mail",
+ "read.only": "true",
+ "user.model.attribute": "email"
+ },
+ "id": "17d60ce2-2d44-4c2c-8b1f-1fba601b9a9f",
+ "name": "email",
+ "parentId": "01122837-9047-4ae4-8ca0-6e2e891a765f",
+ "providerId": "user-attribute-ldap-mapper",
+ "providerType": "org.keycloak.storage.ldap.mappers.LDAPStorageMapper"
+ }
+ ],
+ "name": "myfed",
+ "parentId": "myrealm",
+ "providerId": "ldap",
+ "providerType": "org.keycloak.storage.UserStorageProvider"
}
end_state:
- description: Representation of user federation after module execution.
- returned: on success
- type: dict
- sample: {
- "config": {
- "allowPasswordAuthentication": "false",
- "cachePolicy": "DEFAULT",
- "enabled": "true",
- "kerberosRealm": "EXAMPLE.COM",
- "keyTab": "/etc/krb5.keytab",
- "priority": "0",
- "serverPrincipal": "HTTP/host.example.com@EXAMPLE.COM",
- "updateProfileFirstLogin": "false"
- },
- "id": "cf52ae4f-4471-4435-a0cf-bb620cadc122",
- "mappers": [],
- "name": "kerberos",
- "parentId": "myrealm",
- "providerId": "kerberos",
- "providerType": "org.keycloak.storage.UserStorageProvider"
+ description: Representation of user federation after module execution.
+ returned: on success
+ type: dict
+ sample:
+ {
+ "config": {
+ "allowPasswordAuthentication": "false",
+ "cachePolicy": "DEFAULT",
+ "enabled": "true",
+ "kerberosRealm": "EXAMPLE.COM",
+ "keyTab": "/etc/krb5.keytab",
+ "priority": "0",
+ "serverPrincipal": "HTTP/host.example.com@EXAMPLE.COM",
+ "updateProfileFirstLogin": "false"
+ },
+ "id": "cf52ae4f-4471-4435-a0cf-bb620cadc122",
+ "mappers": [],
+ "name": "kerberos",
+ "parentId": "myrealm",
+ "providerId": "kerberos",
+ "providerType": "org.keycloak.storage.UserStorageProvider"
}
-'''
+"""
from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, camel, \
keycloak_argument_spec, get_token, KeycloakError
@@ -864,8 +841,10 @@ def main():
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True,
required_one_of=([['id', 'name'],
- ['token', 'auth_realm', 'auth_username', 'auth_password']]),
- required_together=([['auth_realm', 'auth_username', 'auth_password']]))
+ ['token', 'auth_realm', 'auth_username', 'auth_password', 'auth_client_id', 'auth_client_secret']]),
+ required_together=([['auth_username', 'auth_password']]),
+ required_by={'refresh_token': 'auth_realm'},
+ )
result = dict(changed=False, msg='', diff={}, proposed={}, existing={}, end_state={})
diff --git a/plugins/modules/keycloak_user_rolemapping.py b/plugins/modules/keycloak_user_rolemapping.py
index 46a6e9e508..49d71e2ca9 100644
--- a/plugins/modules/keycloak_user_rolemapping.py
+++ b/plugins/modules/keycloak_user_rolemapping.py
@@ -7,8 +7,7 @@
from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
module: keycloak_user_rolemapping
short_description: Allows administration of Keycloak user_rolemapping with the Keycloak API
@@ -16,107 +15,99 @@ short_description: Allows administration of Keycloak user_rolemapping with the K
version_added: 5.7.0
description:
- - This module allows you to add, remove or modify Keycloak user_rolemapping with the Keycloak REST API.
- It requires access to the REST API via OpenID Connect; the user connecting and the client being
- used must have the requisite access rights. In a default Keycloak installation, admin-cli
- and an admin user would work, as would a separate client definition with the scope tailored
- to your needs and a user having the expected roles.
-
- - The names of module options are snake_cased versions of the camelCase ones found in the
- Keycloak API and its documentation at U(https://www.keycloak.org/docs-api/8.0/rest-api/index.html).
-
- - Attributes are multi-valued in the Keycloak API. All attributes are lists of individual values and will
- be returned that way by this module. You may pass single values for attributes when calling the module,
- and this will be translated into a list suitable for the API.
-
- - When updating a user_rolemapping, where possible provide the role ID to the module. This removes a lookup
- to the API to translate the name into the role ID.
-
+ - This module allows you to add, remove or modify Keycloak user_rolemapping with the Keycloak REST API. It requires access
+ to the REST API using OpenID Connect; the user connecting and the client being used must have the requisite access rights.
+ In a default Keycloak installation, admin-cli and an admin user would work, as would a separate client definition with
+ the scope tailored to your needs and a user having the expected roles.
+ - The names of module options are snake_cased versions of the camelCase ones found in the Keycloak API and its documentation
+ at U(https://www.keycloak.org/docs-api/8.0/rest-api/index.html).
+ - Attributes are multi-valued in the Keycloak API. All attributes are lists of individual values and are returned that way
+ by this module. You may pass single values for attributes when calling the module, and this is translated into a list
+ suitable for the API.
+ - When updating a user_rolemapping, where possible provide the role ID to the module. This removes a lookup to the API to
+ translate the name into the role ID.
attributes:
- check_mode:
- support: full
- diff_mode:
- support: full
+ check_mode:
+ support: full
+ diff_mode:
+ support: full
+ action_group:
+ version_added: 10.2.0
options:
- state:
- description:
- - State of the user_rolemapping.
- - On V(present), the user_rolemapping will be created if it does not yet exist, or updated with the parameters you provide.
- - On V(absent), the user_rolemapping will be removed if it exists.
- default: 'present'
- type: str
- choices:
- - present
- - absent
+ state:
+ description:
+ - State of the user_rolemapping.
+ - On V(present), the user_rolemapping is created if it does not yet exist, or updated with the parameters you provide.
+ - On V(absent), the user_rolemapping is removed if it exists.
+ default: 'present'
+ type: str
+ choices:
+ - present
+ - absent
- realm:
+ realm:
+ type: str
+ description:
+ - They Keycloak realm under which this role_representation resides.
+ default: 'master'
+
+ target_username:
+ type: str
+ description:
+ - Username of the user roles are mapped to.
+ - This parameter is not required (can be replaced by uid for less API call).
+ uid:
+ type: str
+ description:
+ - ID of the user to be mapped.
+ - This parameter is not required for updating or deleting the rolemapping but providing it reduces the number of API
+ calls required.
+ service_account_user_client_id:
+ type: str
+ description:
+ - Client ID of the service-account-user to be mapped.
+ - This parameter is not required for updating or deleting the rolemapping but providing it reduces the number of API
+ calls required.
+ client_id:
+ type: str
+ description:
+ - Name of the client to be mapped (different than O(cid)).
+ - This parameter is required if O(cid) is not provided (can be replaced by O(cid) to reduce the number of API calls
+ that must be made).
+ cid:
+ type: str
+ description:
+ - ID of the client to be mapped.
+ - This parameter is not required for updating or deleting the rolemapping but providing it reduces the number of API
+ calls required.
+ roles:
+ description:
+ - Roles to be mapped to the user.
+ type: list
+ elements: dict
+ suboptions:
+ name:
type: str
description:
- - They Keycloak realm under which this role_representation resides.
- default: 'master'
-
- target_username:
+ - Name of the role representation.
+ - This parameter is required only when creating or updating the role_representation.
+ id:
type: str
description:
- - Username of the user roles are mapped to.
- - This parameter is not required (can be replaced by uid for less API call).
-
- uid:
- type: str
- description:
- - ID of the user to be mapped.
- - This parameter is not required for updating or deleting the rolemapping but
- providing it will reduce the number of API calls required.
-
- service_account_user_client_id:
- type: str
- description:
- - Client ID of the service-account-user to be mapped.
- - This parameter is not required for updating or deleting the rolemapping but
- providing it will reduce the number of API calls required.
-
- client_id:
- type: str
- description:
- - Name of the client to be mapped (different than O(cid)).
- - This parameter is required if O(cid) is not provided (can be replaced by O(cid)
- to reduce the number of API calls that must be made).
-
- cid:
- type: str
- description:
- - ID of the client to be mapped.
- - This parameter is not required for updating or deleting the rolemapping but
- providing it will reduce the number of API calls required.
-
- roles:
- description:
- - Roles to be mapped to the user.
- type: list
- elements: dict
- suboptions:
- name:
- type: str
- description:
- - Name of the role representation.
- - This parameter is required only when creating or updating the role_representation.
- id:
- type: str
- description:
- - The unique identifier for this role_representation.
- - This parameter is not required for updating or deleting a role_representation but
- providing it will reduce the number of API calls required.
-
+ - The unique identifier for this role_representation.
+ - This parameter is not required for updating or deleting a role_representation but providing it reduces the number
+ of API calls required.
extends_documentation_fragment:
- - community.general.keycloak
- - community.general.attributes
+ - community.general.keycloak
+ - community.general.keycloak.actiongroup_keycloak
+ - community.general.attributes
author:
- - Dušan Marković (@bratwurzt)
-'''
+ - Dušan Marković (@bratwurzt)
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Map a client role to a user, authentication with credentials
community.general.keycloak_user_rolemapping:
realm: MyCustomRealm
@@ -186,49 +177,49 @@ EXAMPLES = '''
- name: role_name2
id: role_id2
delegate_to: localhost
-'''
+"""
-RETURN = '''
+RETURN = r"""
msg:
- description: Message as to what action was taken.
- returned: always
- type: str
- sample: "Role role1 assigned to user user1."
+ description: Message as to what action was taken.
+ returned: always
+ type: str
+ sample: "Role role1 assigned to user user1."
proposed:
- description: Representation of proposed client role mapping.
- returned: always
- type: dict
- sample: {
- clientId: "test"
- }
+ description: Representation of proposed client role mapping.
+ returned: always
+ type: dict
+ sample: {"clientId": "test"}
existing:
- description:
- - Representation of existing client role mapping.
- - The sample is truncated.
- returned: always
- type: dict
- sample: {
- "adminUrl": "http://www.example.com/admin_url",
- "attributes": {
- "request.object.signature.alg": "RS256",
- }
+ description:
+ - Representation of existing client role mapping.
+ - The sample is truncated.
+ returned: always
+ type: dict
+ sample:
+ {
+ "adminUrl": "http://www.example.com/admin_url",
+ "attributes": {
+ "request.object.signature.alg": "RS256"
+ }
}
end_state:
- description:
- - Representation of client role mapping after module execution.
- - The sample is truncated.
- returned: on success
- type: dict
- sample: {
- "adminUrl": "http://www.example.com/admin_url",
- "attributes": {
- "request.object.signature.alg": "RS256",
- }
+ description:
+ - Representation of client role mapping after module execution.
+ - The sample is truncated.
+ returned: on success
+ type: dict
+ sample:
+ {
+ "adminUrl": "http://www.example.com/admin_url",
+ "attributes": {
+ "request.object.signature.alg": "RS256"
+ }
}
-'''
+"""
from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, \
keycloak_argument_spec, get_token, KeycloakError
@@ -263,9 +254,11 @@ def main():
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True,
- required_one_of=([['token', 'auth_realm', 'auth_username', 'auth_password'],
+ required_one_of=([['token', 'auth_realm', 'auth_username', 'auth_password', 'auth_client_id', 'auth_client_secret'],
['uid', 'target_username', 'service_account_user_client_id']]),
- required_together=([['auth_realm', 'auth_username', 'auth_password']]))
+ required_together=([['auth_username', 'auth_password']]),
+ required_by={'refresh_token': 'auth_realm'},
+ )
result = dict(changed=False, msg='', diff={}, proposed={}, existing={}, end_state={})
diff --git a/plugins/modules/keycloak_userprofile.py b/plugins/modules/keycloak_userprofile.py
index 57e1c42e96..9760a17ecf 100644
--- a/plugins/modules/keycloak_userprofile.py
+++ b/plugins/modules/keycloak_userprofile.py
@@ -8,275 +8,274 @@
from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
module: keycloak_userprofile
short_description: Allows managing Keycloak User Profiles
description:
- - This module allows you to create, update, or delete Keycloak User Profiles via Keycloak API. You can also customize the "Unmanaged Attributes" with it.
-
- - The names of module options are snake_cased versions of the camelCase ones found in the
- Keycloak API and its documentation at U(https://www.keycloak.org/docs-api/24.0.5/rest-api/index.html).
- For compatibility reasons, the module also accepts the camelCase versions of the options.
-
+ - This module allows you to create, update, or delete Keycloak User Profiles using the Keycloak API. You can also customize
+ the "Unmanaged Attributes" with it.
+ - The names of module options are snake_cased versions of the camelCase ones found in the Keycloak API and its documentation
+ at U(https://www.keycloak.org/docs-api/24.0.5/rest-api/index.html). For compatibility reasons, the module also accepts
+ the camelCase versions of the options.
version_added: "9.4.0"
attributes:
- check_mode:
- support: full
- diff_mode:
- support: full
+ check_mode:
+ support: full
+ diff_mode:
+ support: full
+ action_group:
+ version_added: 10.2.0
options:
- state:
- description:
- - State of the User Profile provider.
- - On V(present), the User Profile provider will be created if it does not yet exist, or updated with
- the parameters you provide.
- - On V(absent), the User Profile provider will be removed if it exists.
- default: 'present'
- type: str
- choices:
- - present
- - absent
+ state:
+ description:
+ - State of the User Profile provider.
+ - On V(present), the User Profile provider is created if it does not yet exist, or updated with the parameters you provide.
+ - On V(absent), the User Profile provider is removed if it exists.
+ default: 'present'
+ type: str
+ choices:
+ - present
+ - absent
- parent_id:
+ parent_id:
+ description:
+ - The parent ID of the realm key. In practice the ID (name) of the realm.
+ aliases:
+ - parentId
+ - realm
+ type: str
+ required: true
+
+ provider_id:
+ description:
+ - The name of the provider ID for the key (supported value is V(declarative-user-profile)).
+ aliases:
+ - providerId
+ choices: ['declarative-user-profile']
+ default: 'declarative-user-profile'
+ type: str
+
+ provider_type:
+ description:
+ - Component type for User Profile (only supported value is V(org.keycloak.userprofile.UserProfileProvider)).
+ aliases:
+ - providerType
+ choices: ['org.keycloak.userprofile.UserProfileProvider']
+ default: org.keycloak.userprofile.UserProfileProvider
+ type: str
+
+ config:
+ description:
+ - The configuration of the User Profile Provider.
+ type: dict
+ required: false
+ suboptions:
+ kc_user_profile_config:
description:
- - The parent ID of the realm key. In practice the ID (name) of the realm.
+ - Define a declarative User Profile. See EXAMPLES for more context.
aliases:
- - parentId
- - realm
- type: str
- required: true
-
- provider_id:
- description:
- - The name of the provider ID for the key (supported value is V(declarative-user-profile)).
- aliases:
- - providerId
- choices: ['declarative-user-profile']
- default: 'declarative-user-profile'
- type: str
-
- provider_type:
- description:
- - Component type for User Profile (only supported value is V(org.keycloak.userprofile.UserProfileProvider)).
- aliases:
- - providerType
- choices: ['org.keycloak.userprofile.UserProfileProvider']
- default: org.keycloak.userprofile.UserProfileProvider
- type: str
-
- config:
- description:
- - The configuration of the User Profile Provider.
- type: dict
- required: false
+ - kcUserProfileConfig
+ type: list
+ elements: dict
suboptions:
- kc_user_profile_config:
+ attributes:
+ description:
+ - A list of attributes to be included in the User Profile.
+ type: list
+ elements: dict
+ suboptions:
+ name:
description:
- - Define a declarative User Profile. See EXAMPLES for more context.
+ - The name of the attribute.
+ type: str
+ required: true
+
+ display_name:
+ description:
+ - The display name of the attribute.
aliases:
- - kcUserProfileConfig
- type: list
- elements: dict
+ - displayName
+ type: str
+ required: true
+
+ validations:
+ description:
+ - The validations to be applied to the attribute.
+ type: dict
suboptions:
- attributes:
+ length:
+ description:
+ - The length validation for the attribute.
+ type: dict
+ suboptions:
+ min:
description:
- - A list of attributes to be included in the User Profile.
- type: list
- elements: dict
- suboptions:
- name:
- description:
- - The name of the attribute.
- type: str
- required: true
-
- display_name:
- description:
- - The display name of the attribute.
- aliases:
- - displayName
- type: str
- required: true
-
- validations:
- description:
- - The validations to be applied to the attribute.
- type: dict
- suboptions:
- length:
- description:
- - The length validation for the attribute.
- type: dict
- suboptions:
- min:
- description:
- - The minimum length of the attribute.
- type: int
- max:
- description:
- - The maximum length of the attribute.
- type: int
- required: true
-
- email:
- description:
- - The email validation for the attribute.
- type: dict
-
- username_prohibited_characters:
- description:
- - The prohibited characters validation for the username attribute.
- type: dict
- aliases:
- - usernameProhibitedCharacters
-
- up_username_not_idn_homograph:
- description:
- - The validation to prevent IDN homograph attacks in usernames.
- type: dict
- aliases:
- - upUsernameNotIdnHomograph
-
- person_name_prohibited_characters:
- description:
- - The prohibited characters validation for person name attributes.
- type: dict
- aliases:
- - personNameProhibitedCharacters
-
- uri:
- description:
- - The URI validation for the attribute.
- type: dict
-
- pattern:
- description:
- - The pattern validation for the attribute using regular expressions.
- type: dict
-
- options:
- description:
- - Validation to ensure the attribute matches one of the provided options.
- type: dict
-
- annotations:
- description:
- - Annotations for the attribute.
- type: dict
-
- group:
- description:
- - Specifies the User Profile group where this attribute will be added.
- type: str
-
- permissions:
- description:
- - The permissions for viewing and editing the attribute.
- type: dict
- suboptions:
- view:
- description:
- - The roles that can view the attribute.
- - Supported values are V(admin) and V(user).
- type: list
- elements: str
- default:
- - admin
- - user
-
- edit:
- description:
- - The roles that can edit the attribute.
- - Supported values are V(admin) and V(user).
- type: list
- elements: str
- default:
- - admin
- - user
-
- multivalued:
- description:
- - Whether the attribute can have multiple values.
- type: bool
- default: false
-
- required:
- description:
- - The roles that require this attribute.
- type: dict
- suboptions:
- roles:
- description:
- - The roles for which this attribute is required.
- - Supported values are V(admin) and V(user).
- type: list
- elements: str
- default:
- - user
-
- groups:
+ - The minimum length of the attribute.
+ type: int
+ max:
description:
- - A list of attribute groups to be included in the User Profile.
- type: list
- elements: dict
- suboptions:
- name:
- description:
- - The name of the group.
- type: str
- required: true
+ - The maximum length of the attribute.
+ type: int
+ required: true
- display_header:
- description:
- - The display header for the group.
- aliases:
- - displayHeader
- type: str
- required: true
+ email:
+ description:
+ - The email validation for the attribute.
+ type: dict
- display_description:
- description:
- - The display description for the group.
- aliases:
- - displayDescription
- type: str
- required: false
+ username_prohibited_characters:
+ description:
+ - The prohibited characters validation for the username attribute.
+ type: dict
+ aliases:
+ - usernameProhibitedCharacters
- annotations:
- description:
- - The annotations included in the group.
- type: dict
- required: false
+ up_username_not_idn_homograph:
+ description:
+ - The validation to prevent IDN homograph attacks in usernames.
+ type: dict
+ aliases:
+ - upUsernameNotIdnHomograph
- unmanaged_attribute_policy:
- description:
- - Policy for unmanaged attributes.
- aliases:
- - unmanagedAttributePolicy
- type: str
- choices:
- - ENABLED
- - ADMIN_EDIT
- - ADMIN_VIEW
+ person_name_prohibited_characters:
+ description:
+ - The prohibited characters validation for person name attributes.
+ type: dict
+ aliases:
+ - personNameProhibitedCharacters
+
+ uri:
+ description:
+ - The URI validation for the attribute.
+ type: dict
+
+ pattern:
+ description:
+ - The pattern validation for the attribute using regular expressions.
+ type: dict
+
+ options:
+ description:
+ - Validation to ensure the attribute matches one of the provided options.
+ type: dict
+
+ annotations:
+ description:
+ - Annotations for the attribute.
+ type: dict
+
+ group:
+ description:
+ - Specifies the User Profile group where this attribute is added.
+ type: str
+
+ permissions:
+ description:
+ - The permissions for viewing and editing the attribute.
+ type: dict
+ suboptions:
+ view:
+ description:
+ - The roles that can view the attribute.
+ - Supported values are V(admin) and V(user).
+ type: list
+ elements: str
+ default:
+ - admin
+ - user
+
+ edit:
+ description:
+ - The roles that can edit the attribute.
+ - Supported values are V(admin) and V(user).
+ type: list
+ elements: str
+ default:
+ - admin
+ - user
+
+ multivalued:
+ description:
+ - Whether the attribute can have multiple values.
+ type: bool
+ default: false
+
+ required:
+ description:
+ - The roles that require this attribute.
+ type: dict
+ suboptions:
+ roles:
+ description:
+ - The roles for which this attribute is required.
+ - Supported values are V(admin) and V(user).
+ type: list
+ elements: str
+ default:
+ - user
+
+ groups:
+ description:
+ - A list of attribute groups to be included in the User Profile.
+ type: list
+ elements: dict
+ suboptions:
+ name:
+ description:
+ - The name of the group.
+ type: str
+ required: true
+
+ display_header:
+ description:
+ - The display header for the group.
+ aliases:
+ - displayHeader
+ type: str
+ required: true
+
+ display_description:
+ description:
+ - The display description for the group.
+ aliases:
+ - displayDescription
+ type: str
+ required: false
+
+ annotations:
+ description:
+ - The annotations included in the group.
+ type: dict
+ required: false
+
+ unmanaged_attribute_policy:
+ description:
+ - Policy for unmanaged attributes.
+ aliases:
+ - unmanagedAttributePolicy
+ type: str
+ choices:
+ - ENABLED
+ - ADMIN_EDIT
+ - ADMIN_VIEW
notes:
- - Currently, only a single V(declarative-user-profile) entry is supported for O(provider_id) (design of the Keyckoak API).
- However, there can be multiple O(config.kc_user_profile_config[].attributes[]) entries.
-
+ - Currently, only a single V(declarative-user-profile) entry is supported for O(provider_id) (design of the Keyckoak API).
+ However, there can be multiple O(config.kc_user_profile_config[].attributes[]) entries.
extends_documentation_fragment:
- - community.general.keycloak
- - community.general.attributes
+ - community.general.keycloak
+ - community.general.keycloak.actiongroup_keycloak
+ - community.general.attributes
author:
- Eike Waldt (@yeoldegrove)
-'''
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Create a Declarative User Profile with default settings
community.general.keycloak_userprofile:
state: present
@@ -394,9 +393,9 @@ EXAMPLES = '''
config:
kc_user_profile_config:
- unmanagedAttributePolicy: ADMIN_VIEW
-'''
+"""
-RETURN = '''
+RETURN = r"""
msg:
description: The output message generated by the module.
returned: always
@@ -406,8 +405,7 @@ data:
description: The data returned by the Keycloak API.
returned: when state is present
type: dict
- sample: {...}
-'''
+"""
from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, camel, \
keycloak_argument_spec, get_token, KeycloakError
@@ -425,7 +423,7 @@ def remove_null_values(data):
# Recursively remove null values from lists
return [remove_null_values(item) for item in data if item is not None]
else:
- # Return the data if it's neither a dictionary nor a list
+ # Return the data if it is neither a dictionary nor a list
return data
@@ -437,7 +435,7 @@ def camel_recursive(data):
# Apply camelCase conversion to each item in the list
return [camel_recursive(item) for item in data]
else:
- # Return the data as is if it's not a dict or list
+ # Return the data as-is if it is not a dict or list
return data
@@ -456,7 +454,6 @@ def main():
),
config=dict(
type='dict',
- required=False,
options={
'kc_user_profile_config': dict(
type='list',
@@ -466,7 +463,6 @@ def main():
'attributes': dict(
type='list',
elements='dict',
- required=False,
options={
'name': dict(type='str', required=True),
'display_name': dict(type='str', aliases=['displayName'], required=True),
@@ -476,17 +472,17 @@ def main():
'length': dict(
type='dict',
options={
- 'min': dict(type='int', required=False),
+ 'min': dict(type='int'),
'max': dict(type='int', required=True)
}
),
- 'email': dict(type='dict', required=False),
- 'username_prohibited_characters': dict(type='dict', aliases=['usernameProhibitedCharacters'], required=False),
- 'up_username_not_idn_homograph': dict(type='dict', aliases=['upUsernameNotIdnHomograph'], required=False),
- 'person_name_prohibited_characters': dict(type='dict', aliases=['personNameProhibitedCharacters'], required=False),
- 'uri': dict(type='dict', required=False),
- 'pattern': dict(type='dict', required=False),
- 'options': dict(type='dict', required=False)
+ 'email': dict(type='dict'),
+ 'username_prohibited_characters': dict(type='dict', aliases=['usernameProhibitedCharacters']),
+ 'up_username_not_idn_homograph': dict(type='dict', aliases=['upUsernameNotIdnHomograph']),
+ 'person_name_prohibited_characters': dict(type='dict', aliases=['personNameProhibitedCharacters']),
+ 'uri': dict(type='dict'),
+ 'pattern': dict(type='dict'),
+ 'options': dict(type='dict')
}
),
'annotations': dict(type='dict'),
@@ -513,15 +509,15 @@ def main():
options={
'name': dict(type='str', required=True),
'display_header': dict(type='str', aliases=['displayHeader'], required=True),
- 'display_description': dict(type='str', aliases=['displayDescription'], required=False),
- 'annotations': dict(type='dict', required=False)
+ 'display_description': dict(type='str', aliases=['displayDescription']),
+ 'annotations': dict(type='dict')
}
),
'unmanaged_attribute_policy': dict(
type='str',
aliases=['unmanagedAttributePolicy'],
choices=['ENABLED', 'ADMIN_EDIT', 'ADMIN_VIEW'],
- required=False
+
)
}
)
@@ -533,8 +529,10 @@ def main():
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True,
- required_one_of=([['token', 'auth_realm', 'auth_username', 'auth_password']]),
- required_together=([['auth_realm', 'auth_username', 'auth_password']]))
+ required_one_of=([['token', 'auth_realm', 'auth_username', 'auth_password', 'auth_client_id', 'auth_client_secret']]),
+ required_together=([['auth_username', 'auth_password']]),
+ required_by={'refresh_token': 'auth_realm'},
+ )
# Initialize the result object. Only "changed" seems to have special
# meaning for Ansible.
diff --git a/plugins/modules/keyring.py b/plugins/modules/keyring.py
index 8329b727bd..eef59dd10a 100644
--- a/plugins/modules/keyring.py
+++ b/plugins/modules/keyring.py
@@ -13,15 +13,14 @@ from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r"""
----
module: keyring
version_added: 5.2.0
author:
- Alexander Hussey (@ahussey-redhat)
short_description: Set or delete a passphrase using the Operating System's native keyring
description: >-
- This module uses the L(keyring Python library, https://pypi.org/project/keyring/)
- to set or delete passphrases for a given service and username from the OS' native keyring.
+ This module uses the L(keyring Python library, https://pypi.org/project/keyring/) to set or delete passphrases for a given
+ service and username from the OS' native keyring.
requirements:
- keyring (Python library)
- gnome-keyring (application - required for headless Gnome keyring access)
@@ -207,10 +206,10 @@ def run_module():
username=dict(type="str", required=True),
keyring_password=dict(type="str", required=True, no_log=True),
user_password=dict(
- type="str", required=False, no_log=True, aliases=["password"]
+ type="str", no_log=True, aliases=["password"]
),
state=dict(
- type="str", required=False, default="present", choices=["absent", "present"]
+ type="str", default="present", choices=["absent", "present"]
),
)
diff --git a/plugins/modules/keyring_info.py b/plugins/modules/keyring_info.py
index 5c41ecc4d0..836ecafdde 100644
--- a/plugins/modules/keyring_info.py
+++ b/plugins/modules/keyring_info.py
@@ -13,15 +13,14 @@ from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r"""
----
module: keyring_info
version_added: 5.2.0
author:
- Alexander Hussey (@ahussey-redhat)
short_description: Get a passphrase using the Operating System's native keyring
description: >-
- This module uses the L(keyring Python library, https://pypi.org/project/keyring/)
- to retrieve passphrases for a given service and username from the OS' native keyring.
+ This module uses the L(keyring Python library, https://pypi.org/project/keyring/) to retrieve passphrases for a given service
+ and username from the OS' native keyring.
requirements:
- keyring (Python library)
- gnome-keyring (application - required for headless Linux keyring access)
@@ -45,24 +44,24 @@ options:
"""
EXAMPLES = r"""
- - name: Retrieve password for service_name/user_name
- community.general.keyring_info:
- service: test
- username: test1
- keyring_password: "{{ keyring_password }}"
- register: test_password
+- name: Retrieve password for service_name/user_name
+ community.general.keyring_info:
+ service: test
+ username: test1
+ keyring_password: "{{ keyring_password }}"
+ register: test_password
- - name: Display password
- ansible.builtin.debug:
- msg: "{{ test_password.passphrase }}"
+- name: Display password
+ ansible.builtin.debug:
+ msg: "{{ test_password.passphrase }}"
"""
RETURN = r"""
- passphrase:
- description: A string containing the password.
- returned: success and the password exists
- type: str
- sample: Password123
+passphrase:
+ description: A string containing the password.
+ returned: success and the password exists
+ type: str
+ sample: Password123
"""
try:
diff --git a/plugins/modules/kibana_plugin.py b/plugins/modules/kibana_plugin.py
index f6744b3960..b975e2dcea 100644
--- a/plugins/modules/kibana_plugin.py
+++ b/plugins/modules/kibana_plugin.py
@@ -11,71 +11,70 @@ from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
module: kibana_plugin
short_description: Manage Kibana plugins
description:
- - This module can be used to manage Kibana plugins.
+ - This module can be used to manage Kibana plugins.
author: Thierno IB. BARRY (@barryib)
extends_documentation_fragment:
- - community.general.attributes
+ - community.general.attributes
attributes:
- check_mode:
- support: full
- diff_mode:
- support: none
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
options:
- name:
- description:
+ name:
+ description:
- Name of the plugin to install.
- required: true
- type: str
- state:
- description:
+ required: true
+ type: str
+ state:
+ description:
- Desired state of a plugin.
- choices: ["present", "absent"]
- default: present
- type: str
- url:
- description:
+ choices: ["present", "absent"]
+ default: present
+ type: str
+ url:
+ description:
- Set exact URL to download the plugin from.
- - For local file, prefix its absolute path with file://
- type: str
- timeout:
- description:
- - "Timeout setting: 30s, 1m, 1h etc."
- default: 1m
- type: str
- plugin_bin:
- description:
+ - For local file, prefix its absolute path with C(file://).
+ type: str
+ timeout:
+ description:
+ - 'Timeout setting: V(30s), V(1m), V(1h) and so on.'
+ default: 1m
+ type: str
+ plugin_bin:
+ description:
- Location of the Kibana binary.
- default: /opt/kibana/bin/kibana
- type: path
- plugin_dir:
- description:
+ default: /opt/kibana/bin/kibana
+ type: path
+ plugin_dir:
+ description:
- Your configured plugin directory specified in Kibana.
- default: /opt/kibana/installedPlugins/
- type: path
- version:
- description:
+ default: /opt/kibana/installedPlugins/
+ type: path
+ version:
+ description:
- Version of the plugin to be installed.
- - If plugin exists with previous version, plugin will B(not) be updated unless O(force) is set to V(true).
- type: str
- force:
- description:
- - Delete and re-install the plugin. Can be useful for plugins update.
- type: bool
- default: false
- allow_root:
- description:
+ - If the plugin is installed with in a previous version, it is B(not) updated unless O(force=true).
+ type: str
+ force:
+ description:
+ - Delete and re-install the plugin. It can be useful for plugins update.
+ type: bool
+ default: false
+ allow_root:
+ description:
- Whether to allow C(kibana) and C(kibana-plugin) to be run as root. Passes the C(--allow-root) flag to these commands.
- type: bool
- default: false
- version_added: 2.3.0
-'''
+ type: bool
+ default: false
+ version_added: 2.3.0
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Install Elasticsearch head plugin
community.general.kibana_plugin:
state: present
@@ -91,38 +90,30 @@ EXAMPLES = '''
community.general.kibana_plugin:
state: absent
name: elasticsearch/marvel
-'''
+"""
-RETURN = '''
+RETURN = r"""
cmd:
- description: the launched command during plugin management (install / remove)
- returned: success
- type: str
+ description: The launched command during plugin management (install / remove).
+ returned: success
+ type: str
name:
- description: the plugin name to install or remove
- returned: success
- type: str
+ description: The plugin name to install or remove.
+ returned: success
+ type: str
url:
- description: the url from where the plugin is installed from
- returned: success
- type: str
+ description: The URL from where the plugin is installed from.
+ returned: success
+ type: str
timeout:
- description: the timeout for plugin download
- returned: success
- type: str
-stdout:
- description: the command stdout
- returned: success
- type: str
-stderr:
- description: the command stderr
- returned: success
- type: str
+ description: The timeout for plugin download.
+ returned: success
+ type: str
state:
- description: the state for the managed plugin
- returned: success
- type: str
-'''
+ description: The state for the managed plugin.
+ returned: success
+ type: str
+"""
import os
from ansible.module_utils.basic import AnsibleModule
@@ -237,11 +228,11 @@ def main():
argument_spec=dict(
name=dict(required=True),
state=dict(default="present", choices=list(PACKAGE_STATE_MAP.keys())),
- url=dict(default=None),
+ url=dict(),
timeout=dict(default="1m"),
plugin_bin=dict(default="/opt/kibana/bin/kibana", type="path"),
plugin_dir=dict(default="/opt/kibana/installedPlugins/", type="path"),
- version=dict(default=None),
+ version=dict(),
force=dict(default=False, type="bool"),
allow_root=dict(default=False, type="bool"),
),
diff --git a/plugins/modules/krb_ticket.py b/plugins/modules/krb_ticket.py
new file mode 100644
index 0000000000..3a01944535
--- /dev/null
+++ b/plugins/modules/krb_ticket.py
@@ -0,0 +1,383 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright (c) 2024 Alexander Bakanovskii
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r"""
+module: krb_ticket
+short_description: Kerberos utils for managing tickets
+version_added: 10.0.0
+description:
+ - Manage Kerberos tickets with C(kinit), C(klist) and C(kdestroy) base utilities.
+ - See U(https://web.mit.edu/kerberos/krb5-1.12/doc/user/user_commands/index.html) for reference.
+author: "Alexander Bakanovskii (@abakanovskii)"
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ password:
+ description:
+ - Principal password.
+ - It is required to specify O(password) or O(keytab_path).
+ type: str
+ principal:
+ description:
+ - The principal name.
+ - If not set, the user running this module is used.
+ type: str
+ state:
+ description:
+ - The state of the Kerberos ticket.
+ - V(present) is equivalent of C(kinit) command.
+ - V(absent) is equivalent of C(kdestroy) command.
+ type: str
+ default: present
+ choices: ["present", "absent"]
+ kdestroy_all:
+ description:
+ - When O(state=absent) destroys all credential caches in collection.
+ - Equivalent of running C(kdestroy -A).
+ type: bool
+ cache_name:
+ description:
+ - Use O(cache_name) as the ticket cache name and location.
+ - If this option is not used, the default cache name and location are used.
+ - The default credentials cache may vary between systems.
+ - If not set the the value of E(KRB5CCNAME) environment variable is used instead, its value is used to name the default
+ ticket cache.
+ type: str
+ lifetime:
+ description:
+ - Requests a ticket with the lifetime, if the O(lifetime) is not specified, the default ticket lifetime is used.
+ - Specifying a ticket lifetime longer than the maximum ticket lifetime (configured by each site) does not override the
+ configured maximum ticket lifetime.
+ - 'The value for O(lifetime) must be followed by one of the following suffixes: V(s) - seconds, V(m) - minutes, V(h)
+ - hours, V(d) - days.'
+ - You cannot mix units; a value of V(3h30m) results in an error.
+ - See U(https://web.mit.edu/kerberos/krb5-1.12/doc/basic/date_format.html) for reference.
+ type: str
+ start_time:
+ description:
+ - Requests a postdated ticket.
+ - Postdated tickets are issued with the invalid flag set, and need to be resubmitted to the KDC for validation before
+ use.
+ - O(start_time) specifies the duration of the delay before the ticket can become valid.
+ - You can use absolute time formats, for example V(July 27, 2012 at 20:30) you would neet to set O(start_time=20120727203000).
+ - You can also use time duration format similar to O(lifetime) or O(renewable).
+ - See U(https://web.mit.edu/kerberos/krb5-1.12/doc/basic/date_format.html) for reference.
+ type: str
+ renewable:
+ description:
+ - Requests renewable tickets, with a total lifetime equal to O(renewable).
+ - 'The value for O(renewable) must be followed by one of the following delimiters: V(s) - seconds, V(m) - minutes, V(h)
+ - hours, V(d) - days.'
+ - You cannot mix units; a value of V(3h30m) results in an error.
+ - See U(https://web.mit.edu/kerberos/krb5-1.12/doc/basic/date_format.html) for reference.
+ type: str
+ forwardable:
+ description:
+ - Request forwardable or non-forwardable tickets.
+ type: bool
+ proxiable:
+ description:
+ - Request proxiable or non-proxiable tickets.
+ type: bool
+ address_restricted:
+ description:
+ - Request tickets restricted to the host's local address or non-restricted.
+ type: bool
+ anonymous:
+ description:
+ - Requests anonymous processing.
+ type: bool
+ canonicalization:
+ description:
+ - Requests canonicalization of the principal name, and allows the KDC to reply with a different client principal from
+ the one requested.
+ type: bool
+ enterprise:
+ description:
+ - Treats the principal name as an enterprise name (implies the O(canonicalization) option).
+ type: bool
+ renewal:
+ description:
+ - Requests renewal of the ticket-granting ticket.
+ - Note that an expired ticket cannot be renewed, even if the ticket is still within its renewable life.
+ type: bool
+ validate:
+ description:
+ - Requests that the ticket-granting ticket in the cache (with the invalid flag set) be passed to the KDC for validation.
+ - If the ticket is within its requested time range, the cache is replaced with the validated ticket.
+ type: bool
+ keytab:
+ description:
+ - Requests a ticket, obtained from a key in the local host's keytab.
+ - If O(keytab_path) is not specified it tries to use default client keytab path (C(-i) option).
+ type: bool
+ keytab_path:
+ description:
+ - Use when O(keytab=true) to specify path to a keytab file.
+ - It is required to specify O(password) or O(keytab_path).
+ type: path
+requirements:
+ - krb5-user and krb5-config packages
+extends_documentation_fragment:
+ - community.general.attributes
+"""
+
+EXAMPLES = r"""
+- name: Get Kerberos ticket using default principal
+ community.general.krb_ticket:
+ password: some_password
+
+- name: Get Kerberos ticket using keytab
+ community.general.krb_ticket:
+ keytab: true
+ keytab_path: /etc/ipa/file.keytab
+
+- name: Get Kerberos ticket with a lifetime of 7 days
+ community.general.krb_ticket:
+ password: some_password
+ lifetime: 7d
+
+- name: Get Kerberos ticket with a starting time of July 2, 2024, 1:35:30 p.m.
+ community.general.krb_ticket:
+ password: some_password
+ start_time: "240702133530"
+
+- name: Get Kerberos ticket using principal name
+ community.general.krb_ticket:
+ password: some_password
+ principal: admin
+
+- name: Get Kerberos ticket using principal with realm
+ community.general.krb_ticket:
+ password: some_password
+ principal: admin@IPA.TEST
+
+- name: Check for existence by ticket cache
+ community.general.krb_ticket:
+ cache_name: KEYRING:persistent:0:0
+
+- name: Make sure default ticket is destroyed
+ community.general.krb_ticket:
+ state: absent
+
+- name: Make sure specific ticket destroyed by principal
+ community.general.krb_ticket:
+ state: absent
+ principal: admin@IPA.TEST
+
+- name: Make sure specific ticket destroyed by cache_name
+ community.general.krb_ticket:
+ state: absent
+ cache_name: KEYRING:persistent:0:0
+
+- name: Make sure all tickets are destroyed
+ community.general.krb_ticket:
+ state: absent
+ kdestroy_all: true
+"""
+
+from ansible.module_utils.basic import AnsibleModule, env_fallback
+from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, cmd_runner_fmt
+
+
+class IPAKeytab(object):
+ def __init__(self, module, **kwargs):
+ self.module = module
+ self.password = kwargs['password']
+ self.principal = kwargs['principal']
+ self.state = kwargs['state']
+ self.kdestroy_all = kwargs['kdestroy_all']
+ self.cache_name = kwargs['cache_name']
+ self.start_time = kwargs['start_time']
+ self.renewable = kwargs['renewable']
+ self.forwardable = kwargs['forwardable']
+ self.proxiable = kwargs['proxiable']
+ self.address_restricted = kwargs['address_restricted']
+ self.canonicalization = kwargs['canonicalization']
+ self.enterprise = kwargs['enterprise']
+ self.renewal = kwargs['renewal']
+ self.validate = kwargs['validate']
+ self.keytab = kwargs['keytab']
+ self.keytab_path = kwargs['keytab_path']
+
+ self.kinit = CmdRunner(
+ module,
+ command='kinit',
+ arg_formats=dict(
+ lifetime=cmd_runner_fmt.as_opt_val('-l'),
+ start_time=cmd_runner_fmt.as_opt_val('-s'),
+ renewable=cmd_runner_fmt.as_opt_val('-r'),
+ forwardable=cmd_runner_fmt.as_bool('-f', '-F', ignore_none=True),
+ proxiable=cmd_runner_fmt.as_bool('-p', '-P', ignore_none=True),
+ address_restricted=cmd_runner_fmt.as_bool('-a', '-A', ignore_none=True),
+ anonymous=cmd_runner_fmt.as_bool('-n'),
+ canonicalization=cmd_runner_fmt.as_bool('-C'),
+ enterprise=cmd_runner_fmt.as_bool('-E'),
+ renewal=cmd_runner_fmt.as_bool('-R'),
+ validate=cmd_runner_fmt.as_bool('-v'),
+ keytab=cmd_runner_fmt.as_bool('-k'),
+ keytab_path=cmd_runner_fmt.as_func(lambda v: ['-t', v] if v else ['-i']),
+ cache_name=cmd_runner_fmt.as_opt_val('-c'),
+ principal=cmd_runner_fmt.as_list(),
+ )
+ )
+
+ self.kdestroy = CmdRunner(
+ module,
+ command='kdestroy',
+ arg_formats=dict(
+ kdestroy_all=cmd_runner_fmt.as_bool('-A'),
+ cache_name=cmd_runner_fmt.as_opt_val('-c'),
+ principal=cmd_runner_fmt.as_opt_val('-p'),
+ )
+ )
+
+ self.klist = CmdRunner(
+ module,
+ command='klist',
+ arg_formats=dict(
+ show_list=cmd_runner_fmt.as_bool('-l'),
+ )
+ )
+
+ def exec_kinit(self):
+ params = dict(self.module.params)
+ with self.kinit(
+ "lifetime start_time renewable forwardable proxiable address_restricted anonymous "
+ "canonicalization enterprise renewal validate keytab keytab_path cache_name principal",
+ check_rc=True,
+ data=self.password,
+ ) as ctx:
+ rc, out, err = ctx.run(**params)
+ return out
+
+ def exec_kdestroy(self):
+ params = dict(self.module.params)
+ with self.kdestroy(
+ "kdestroy_all cache_name principal",
+ check_rc=True
+ ) as ctx:
+ rc, out, err = ctx.run(**params)
+ return out
+
+ def exec_klist(self, show_list):
+ # Use chech_rc = False because
+ # If no tickets present, klist command will always return rc = 1
+ params = dict(show_list=show_list)
+ with self.klist(
+ "show_list",
+ check_rc=False
+ ) as ctx:
+ rc, out, err = ctx.run(**params)
+ return rc, out, err
+
+ def check_ticket_present(self):
+ ticket_present = True
+ show_list = False
+
+ if not self.principal and not self.cache_name:
+ rc, out, err = self.exec_klist(show_list)
+ if rc != 0:
+ ticket_present = False
+ else:
+ show_list = True
+ rc, out, err = self.exec_klist(show_list)
+ if self.principal and self.principal not in str(out):
+ ticket_present = False
+ if self.cache_name and self.cache_name not in str(out):
+ ticket_present = False
+
+ return ticket_present
+
+
+def main():
+ arg_spec = dict(
+ principal=dict(type='str'),
+ password=dict(type='str', no_log=True),
+ state=dict(default='present', choices=['present', 'absent']),
+ kdestroy_all=dict(type='bool'),
+ cache_name=dict(type='str', fallback=(env_fallback, ['KRB5CCNAME'])),
+ lifetime=dict(type='str'),
+ start_time=dict(type='str'),
+ renewable=dict(type='str'),
+ forwardable=dict(type='bool'),
+ proxiable=dict(type='bool'),
+ address_restricted=dict(type='bool'),
+ anonymous=dict(type='bool'),
+ canonicalization=dict(type='bool'),
+ enterprise=dict(type='bool'),
+ renewal=dict(type='bool'),
+ validate=dict(type='bool'),
+ keytab=dict(type='bool'),
+ keytab_path=dict(type='path'),
+ )
+ module = AnsibleModule(
+ argument_spec=arg_spec,
+ supports_check_mode=True,
+ required_by={
+ 'keytab_path': 'keytab'
+ },
+ required_if=[
+ ('state', 'present', ('password', 'keytab_path'), True),
+ ],
+ )
+
+ state = module.params['state']
+ kdestroy_all = module.params['kdestroy_all']
+
+ keytab = IPAKeytab(module,
+ state=state,
+ kdestroy_all=kdestroy_all,
+ principal=module.params['principal'],
+ password=module.params['password'],
+ cache_name=module.params['cache_name'],
+ lifetime=module.params['lifetime'],
+ start_time=module.params['start_time'],
+ renewable=module.params['renewable'],
+ forwardable=module.params['forwardable'],
+ proxiable=module.params['proxiable'],
+ address_restricted=module.params['address_restricted'],
+ anonymous=module.params['anonymous'],
+ canonicalization=module.params['canonicalization'],
+ enterprise=module.params['enterprise'],
+ renewal=module.params['renewal'],
+ validate=module.params['validate'],
+ keytab=module.params['keytab'],
+ keytab_path=module.params['keytab_path'],
+ )
+
+ if module.params['keytab_path'] is not None and module.params['keytab'] is not True:
+ module.fail_json(msg="If keytab_path is specified then keytab parameter must be True")
+
+ changed = False
+ if state == 'present':
+ if not keytab.check_ticket_present():
+ changed = True
+ if not module.check_mode:
+ keytab.exec_kinit()
+
+ if state == 'absent':
+ if kdestroy_all:
+ changed = True
+ if not module.check_mode:
+ keytab.exec_kdestroy()
+ elif keytab.check_ticket_present():
+ changed = True
+ if not module.check_mode:
+ keytab.exec_kdestroy()
+
+ module.exit_json(changed=changed)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/plugins/modules/launchd.py b/plugins/modules/launchd.py
index a6427bdb2f..310e1af9b1 100644
--- a/plugins/modules/launchd.py
+++ b/plugins/modules/launchd.py
@@ -8,8 +8,7 @@
from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = r'''
----
+DOCUMENTATION = r"""
module: launchd
author:
- Martin Migasiewicz (@martinm82)
@@ -20,51 +19,53 @@ description:
extends_documentation_fragment:
- community.general.attributes
attributes:
- check_mode:
- support: full
- diff_mode:
- support: none
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
options:
- name:
- description:
+ name:
+ description:
- Name of the service.
- type: str
- required: true
- state:
- description:
- - V(started)/V(stopped) are idempotent actions that will not run
- commands unless necessary.
- - Launchd does not support V(restarted) nor V(reloaded) natively.
- These will trigger a stop/start (restarted) or an unload/load
- (reloaded).
- - V(restarted) unloads and loads the service before start to ensure
- that the latest job definition (plist) is used.
- - V(reloaded) unloads and loads the service to ensure that the latest
- job definition (plist) is used. Whether a service is started or
- stopped depends on the content of the definition file.
- type: str
- choices: [ reloaded, restarted, started, stopped, unloaded ]
- enabled:
- description:
+ type: str
+ required: true
+ plist:
+ description:
+ - Name of the V(.plist) file for the service.
+ - Defaults to V({name}.plist).
+ type: str
+ version_added: 10.1.0
+ state:
+ description:
+ - V(started)/V(stopped) are idempotent actions that do not run commands unless necessary.
+ - C(launchd) does not support V(restarted) nor V(reloaded) natively. These states trigger a stop/start (restarted) or
+ an unload/load (reloaded).
+ - V(restarted) unloads and loads the service before start to ensure that the latest job definition (plist) is used.
+ - V(reloaded) unloads and loads the service to ensure that the latest job definition (plist) is used. Whether a service
+ is started or stopped depends on the content of the definition file.
+ type: str
+ choices: [reloaded, restarted, started, stopped, unloaded]
+ enabled:
+ description:
- Whether the service should start on boot.
- - B(At least one of state and enabled are required.)
- type: bool
- force_stop:
- description:
+ - B(At least one of state and enabled are required).
+ type: bool
+ force_stop:
+ description:
- Whether the service should not be restarted automatically by launchd.
- - Services might have the 'KeepAlive' attribute set to true in a launchd configuration.
- In case this is set to true, stopping a service will cause that launchd starts the service again.
- - Set this option to V(true) to let this module change the 'KeepAlive' attribute to V(false).
- type: bool
- default: false
+ - Services might have the C(KeepAlive) attribute set to V(true) in a launchd configuration. In case this is set to V(true),
+ stopping a service causes that C(launchd) starts the service again.
+ - Set this option to V(true) to let this module change the C(KeepAlive) attribute to V(false).
+ type: bool
+ default: false
notes:
-- A user must privileged to manage services using this module.
+ - A user must privileged to manage services using this module.
requirements:
-- A system managed by launchd
-- The plistlib python library
-'''
+ - A system managed by launchd
+ - The plistlib Python library
+"""
-EXAMPLES = r'''
+EXAMPLES = r"""
- name: Make sure spotify webhelper is started
community.general.launchd:
name: com.spotify.webhelper
@@ -100,21 +101,27 @@ EXAMPLES = r'''
community.general.launchd:
name: org.memcached
state: unloaded
-'''
-RETURN = r'''
+- name: restart sshd
+ community.general.launchd:
+ name: com.openssh.sshd
+ plist: ssh.plist
+ state: restarted
+"""
+
+RETURN = r"""
status:
- description: Metadata about service status
- returned: always
- type: dict
- sample:
- {
- "current_pid": "-",
- "current_state": "stopped",
- "previous_pid": "82636",
- "previous_state": "running"
- }
-'''
+ description: Metadata about service status.
+ returned: always
+ type: dict
+ sample:
+ {
+ "current_pid": "-",
+ "current_state": "stopped",
+ "previous_pid": "82636",
+ "previous_state": "running"
+ }
+"""
import os
import plistlib
@@ -145,25 +152,31 @@ class ServiceState:
class Plist:
- def __init__(self, module, service):
+ def __init__(self, module, service, filename=None):
self.__changed = False
self.__service = service
+ if filename is not None:
+ self.__filename = filename
+ else:
+ self.__filename = '%s.plist' % service
state, pid, dummy, dummy = LaunchCtlList(module, self.__service).run()
# Check if readPlist is available or not
self.old_plistlib = hasattr(plistlib, 'readPlist')
- self.__file = self.__find_service_plist(self.__service)
+ self.__file = self.__find_service_plist(self.__filename)
if self.__file is None:
- msg = 'Unable to infer the path of %s service plist file' % self.__service
+ msg = 'Unable to find the plist file %s for service %s' % (
+ self.__filename, self.__service,
+ )
if pid is None and state == ServiceState.UNLOADED:
msg += ' and it was not found among active services'
module.fail_json(msg=msg)
self.__update(module)
@staticmethod
- def __find_service_plist(service_name):
+ def __find_service_plist(filename):
"""Finds the plist file associated with a service"""
launchd_paths = [
@@ -180,7 +193,6 @@ class Plist:
except OSError:
continue
- filename = '%s.plist' % service_name
if filename in files:
return os.path.join(path, filename)
return None
@@ -461,6 +473,7 @@ def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(type='str', required=True),
+ plist=dict(type='str'),
state=dict(type='str', choices=['reloaded', 'restarted', 'started', 'stopped', 'unloaded']),
enabled=dict(type='bool'),
force_stop=dict(type='bool', default=False),
@@ -472,6 +485,7 @@ def main():
)
service = module.params['name']
+ plist_filename = module.params['plist']
action = module.params['state']
rc = 0
out = err = ''
@@ -483,7 +497,7 @@ def main():
# We will tailor the plist file in case one of the options
# (enabled, force_stop) was specified.
- plist = Plist(module, service)
+ plist = Plist(module, service, plist_filename)
result['changed'] = plist.is_changed()
# Gather information about the service to be controlled.
diff --git a/plugins/modules/layman.py b/plugins/modules/layman.py
index 13d514274b..b19428d9f9 100644
--- a/plugins/modules/layman.py
+++ b/plugins/modules/layman.py
@@ -10,14 +10,13 @@ from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
module: layman
author: "Jakub Jirutka (@jirutka)"
short_description: Manage Gentoo overlays
description:
- - Uses Layman to manage an additional repositories for the Portage package manager on Gentoo Linux.
- Please note that Layman must be installed on a managed node prior using this module.
+ - Uses Layman to manage an additional repositories for the Portage package manager on Gentoo Linux. Please note that Layman
+ must be installed on a managed node prior using this module.
requirements:
- layman python module
extends_documentation_fragment:
@@ -30,15 +29,14 @@ attributes:
options:
name:
description:
- - The overlay id to install, synchronize, or uninstall.
- Use 'ALL' to sync all of the installed overlays (can be used only when O(state=updated)).
+ - The overlay ID to install, synchronize, or uninstall. Use V(ALL) to sync all of the installed overlays (can be used
+ only when O(state=updated)).
required: true
type: str
list_url:
description:
- - An URL of the alternative overlays list that defines the overlay to install.
- This list will be fetched and saved under C(${overlay_defs}/${name}.xml), where
- C(overlay_defs) is read from the Layman's configuration.
+ - An URL of the alternative overlays list that defines the overlay to install. This list is fetched and saved under
+ C(${overlay_defs}/${name}.xml), where C(overlay_defs) is read from the Layman's configuration.
aliases: [url]
type: str
state:
@@ -49,14 +47,12 @@ options:
type: str
validate_certs:
description:
- - If V(false), SSL certificates will not be validated. This should only be
- set to V(false) when no other option exists. Prior to 1.9.3 the code
- defaulted to V(false).
+ - If V(false), SSL certificates are not validated. This should only be set to V(false) when no other option exists.
type: bool
default: true
-'''
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Install the overlay mozilla which is on the central overlays list
community.general.layman:
name: mozilla
@@ -81,7 +77,7 @@ EXAMPLES = '''
community.general.layman:
name: cvut
state: absent
-'''
+"""
import shutil
import traceback
@@ -240,7 +236,7 @@ def main():
name=dict(required=True),
list_url=dict(aliases=['url']),
state=dict(default="present", choices=['present', 'absent', 'updated']),
- validate_certs=dict(required=False, default=True, type='bool'),
+ validate_certs=dict(default=True, type='bool'),
),
supports_check_mode=True
)
diff --git a/plugins/modules/lbu.py b/plugins/modules/lbu.py
index c961b6060d..e91fd5e01a 100644
--- a/plugins/modules/lbu.py
+++ b/plugins/modules/lbu.py
@@ -8,8 +8,7 @@
from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
module: lbu
short_description: Local Backup Utility for Alpine Linux
@@ -17,8 +16,7 @@ short_description: Local Backup Utility for Alpine Linux
version_added: '0.2.0'
description:
- - Manage Local Backup Utility of Alpine Linux in run-from-RAM mode
-
+ - Manage Local Backup Utility of Alpine Linux in run-from-RAM mode.
extends_documentation_fragment:
- community.general.attributes
@@ -31,24 +29,24 @@ attributes:
options:
commit:
description:
- - Control whether to commit changed files.
+ - Control whether to commit changed files.
type: bool
exclude:
description:
- - List of paths to exclude.
+ - List of paths to exclude.
type: list
elements: str
include:
description:
- - List of paths to include.
+ - List of paths to include.
type: list
elements: str
author:
- Kaarle Ritvanen (@kunkku)
-'''
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
# Commit changed files (if any)
- name: Commit
community.general.lbu:
@@ -59,22 +57,22 @@ EXAMPLES = '''
community.general.lbu:
commit: true
exclude:
- - /etc/opt
+ - /etc/opt
# Include paths without committing
- name: Include file and directory
community.general.lbu:
include:
- - /root/.ssh/authorized_keys
- - /var/lib/misc
-'''
+ - /root/.ssh/authorized_keys
+ - /var/lib/misc
+"""
-RETURN = '''
+RETURN = r"""
msg:
- description: Error message
+ description: Error message.
type: str
returned: on failure
-'''
+"""
from ansible.module_utils.basic import AnsibleModule
diff --git a/plugins/modules/ldap_attrs.py b/plugins/modules/ldap_attrs.py
index 7986833a6e..592da93a63 100644
--- a/plugins/modules/ldap_attrs.py
+++ b/plugins/modules/ldap_attrs.py
@@ -12,27 +12,17 @@ from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = r'''
----
+DOCUMENTATION = r"""
module: ldap_attrs
short_description: Add or remove multiple LDAP attribute values
description:
- Add or remove multiple LDAP attribute values.
notes:
- - This only deals with attributes on existing entries. To add or remove
- whole entries, see M(community.general.ldap_entry).
- - The default authentication settings will attempt to use a SASL EXTERNAL
- bind over a UNIX domain socket. This works well with the default Ubuntu
- install for example, which includes a cn=peercred,cn=external,cn=auth ACL
- rule allowing root to modify the server configuration. If you need to use
- a simple bind to access your server, pass the credentials in O(bind_dn)
- and O(bind_pw).
- - For O(state=present) and O(state=absent), all value comparisons are
- performed on the server for maximum accuracy. For O(state=exact), values
- have to be compared in Python, which obviously ignores LDAP matching
- rules. This should work out in most cases, but it is theoretically
- possible to see spurious changes when target and actual values are
- semantically identical but lexically distinct.
+ - This only deals with attributes on existing entries. To add or remove whole entries, see M(community.general.ldap_entry).
+ - For O(state=present) and O(state=absent), all value comparisons are performed on the server for maximum accuracy. For
+ O(state=exact), values have to be compared in Python, which obviously ignores LDAP matching rules. This should work out
+ in most cases, but it is theoretically possible to see spurious changes when target and actual values are semantically
+ identical but lexically distinct.
version_added: '0.2.0'
author:
- Jiri Tyr (@jtyr)
@@ -53,46 +43,38 @@ options:
choices: [present, absent, exact]
default: present
description:
- - The state of the attribute values. If V(present), all given attribute
- values will be added if they're missing. If V(absent), all given
- attribute values will be removed if present. If V(exact), the set of
- attribute values will be forced to exactly those provided and no others.
- If O(state=exact) and the attribute value is empty, all values for
- this attribute will be removed.
+ - The state of the attribute values. If V(present), all given attribute values are added if they are missing. If V(absent),
+ all given attribute values are removed if present. If V(exact), the set of attribute values is forced to exactly those
+ provided and no others. If O(state=exact) and the attribute value is empty, all values for this attribute are removed.
attributes:
required: true
type: dict
description:
- The attribute(s) and value(s) to add or remove.
- - Each attribute value can be a string for single-valued attributes or
- a list of strings for multi-valued attributes.
- - If you specify values for this option in YAML, please note that you can improve
- readability for long string values by using YAML block modifiers as seen in the
- examples for this module.
- - Note that when using values that YAML/ansible-core interprets as other types,
- like V(yes), V(no) (booleans), or V(2.10) (float), make sure to quote them if
- these are meant to be strings. Otherwise the wrong values may be sent to LDAP.
+ - Each attribute value can be a string for single-valued attributes or a list of strings for multi-valued attributes.
+ - If you specify values for this option in YAML, please note that you can improve readability for long string values
+ by using YAML block modifiers as seen in the examples for this module.
+ - Note that when using values that YAML/ansible-core interprets as other types, like V(yes), V(no) (booleans), or V(2.10)
+ (float), make sure to quote them if these are meant to be strings. Otherwise the wrong values may be sent to LDAP.
ordered:
required: false
type: bool
default: false
description:
- - If V(true), prepend list values with X-ORDERED index numbers in all
- attributes specified in the current task. This is useful mostly with
- C(olcAccess) attribute to easily manage LDAP Access Control Lists.
+ - If V(true), prepend list values with X-ORDERED index numbers in all attributes specified in the current task. This
+ is useful mostly with C(olcAccess) attribute to easily manage LDAP Access Control Lists.
extends_documentation_fragment:
- community.general.ldap.documentation
- community.general.attributes
-
-'''
+"""
-EXAMPLES = r'''
+EXAMPLES = r"""
- name: Configure directory number 1 for example.com
community.general.ldap_attrs:
dn: olcDatabase={1}hdb,cn=config
attributes:
- olcSuffix: dc=example,dc=com
+ olcSuffix: dc=example,dc=com
state: exact
# The complex argument format is required here to pass a list of ACL strings.
@@ -100,17 +82,17 @@ EXAMPLES = r'''
community.general.ldap_attrs:
dn: olcDatabase={1}hdb,cn=config
attributes:
- olcAccess:
- - >-
- {0}to attrs=userPassword,shadowLastChange
- by self write
- by anonymous auth
- by dn="cn=admin,dc=example,dc=com" write
- by * none'
- - >-
- {1}to dn.base="dc=example,dc=com"
- by dn="cn=admin,dc=example,dc=com" write
- by * read
+ olcAccess:
+ - >-
+ {0}to attrs=userPassword,shadowLastChange
+ by self write
+ by anonymous auth
+ by dn="cn=admin,dc=example,dc=com" write
+ by * none'
+ - >-
+ {1}to dn.base="dc=example,dc=com"
+ by dn="cn=admin,dc=example,dc=com" write
+ by * read
state: exact
# An alternative approach with automatic X-ORDERED numbering
@@ -118,17 +100,17 @@ EXAMPLES = r'''
community.general.ldap_attrs:
dn: olcDatabase={1}hdb,cn=config
attributes:
- olcAccess:
- - >-
- to attrs=userPassword,shadowLastChange
- by self write
- by anonymous auth
- by dn="cn=admin,dc=example,dc=com" write
- by * none'
- - >-
- to dn.base="dc=example,dc=com"
- by dn="cn=admin,dc=example,dc=com" write
- by * read
+ olcAccess:
+ - >-
+ to attrs=userPassword,shadowLastChange
+ by self write
+ by anonymous auth
+ by dn="cn=admin,dc=example,dc=com" write
+ by * none'
+ - >-
+ to dn.base="dc=example,dc=com"
+ by dn="cn=admin,dc=example,dc=com" write
+ by * read
ordered: true
state: exact
@@ -136,23 +118,23 @@ EXAMPLES = r'''
community.general.ldap_attrs:
dn: olcDatabase={1}hdb,cn=config
attributes:
- olcDbIndex:
- - objectClass eq
- - uid eq
+ olcDbIndex:
+ - objectClass eq
+ - uid eq
- name: Set up a root user, which we can use later to bootstrap the directory
community.general.ldap_attrs:
dn: olcDatabase={1}hdb,cn=config
attributes:
- olcRootDN: cn=root,dc=example,dc=com
- olcRootPW: "{SSHA}tabyipcHzhwESzRaGA7oQ/SDoBZQOGND"
+ olcRootDN: cn=root,dc=example,dc=com
+ olcRootPW: "{SSHA}tabyipcHzhwESzRaGA7oQ/SDoBZQOGND"
state: exact
- name: Remove an attribute with a specific value
community.general.ldap_attrs:
dn: uid=jdoe,ou=people,dc=example,dc=com
attributes:
- description: "An example user account"
+ description: "An example user account"
state: absent
server_uri: ldap://localhost/
bind_dn: cn=admin,dc=example,dc=com
@@ -162,22 +144,22 @@ EXAMPLES = r'''
community.general.ldap_attrs:
dn: uid=jdoe,ou=people,dc=example,dc=com
attributes:
- description: []
+ description: []
state: exact
server_uri: ldap://localhost/
bind_dn: cn=admin,dc=example,dc=com
bind_pw: password
-'''
+"""
-RETURN = r'''
+RETURN = r"""
modlist:
- description: list of modified parameters
+ description: List of modified parameters.
returned: success
type: list
sample:
- [2, "olcRootDN", ["cn=root,dc=example,dc=com"]]
-'''
+"""
import traceback
@@ -314,7 +296,7 @@ def main():
module = AnsibleModule(
argument_spec=gen_specs(
attributes=dict(type='dict', required=True),
- ordered=dict(type='bool', default=False, required=False),
+ ordered=dict(type='bool', default=False),
state=dict(type='str', default='present', choices=['absent', 'exact', 'present']),
),
supports_check_mode=True,
diff --git a/plugins/modules/ldap_entry.py b/plugins/modules/ldap_entry.py
index 5deaf7c4c4..230f6337ab 100644
--- a/plugins/modules/ldap_entry.py
+++ b/plugins/modules/ldap_entry.py
@@ -11,21 +11,12 @@ from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
module: ldap_entry
short_description: Add or remove LDAP entries
description:
- - Add or remove LDAP entries. This module only asserts the existence or
- non-existence of an LDAP entry, not its attributes. To assert the
- attribute values of an entry, see M(community.general.ldap_attrs).
-notes:
- - The default authentication settings will attempt to use a SASL EXTERNAL
- bind over a UNIX domain socket. This works well with the default Ubuntu
- install for example, which includes a cn=peercred,cn=external,cn=auth ACL
- rule allowing root to modify the server configuration. If you need to use
- a simple bind to access your server, pass the credentials in O(bind_dn)
- and O(bind_pw).
+ - Add or remove LDAP entries. This module only asserts the existence or non-existence of an LDAP entry, not its attributes.
+ To assert the attribute values of an entry, see M(community.general.ldap_attrs).
author:
- Jiri Tyr (@jtyr)
requirements:
@@ -38,24 +29,19 @@ attributes:
options:
attributes:
description:
- - If O(state=present), attributes necessary to create an entry. Existing
- entries are never modified. To assert specific attribute values on an
- existing entry, use M(community.general.ldap_attrs) module instead.
- - Each attribute value can be a string for single-valued attributes or
- a list of strings for multi-valued attributes.
- - If you specify values for this option in YAML, please note that you can improve
- readability for long string values by using YAML block modifiers as seen in the
- examples for this module.
- - Note that when using values that YAML/ansible-core interprets as other types,
- like V(yes), V(no) (booleans), or V(2.10) (float), make sure to quote them if
- these are meant to be strings. Otherwise the wrong values may be sent to LDAP.
+ - If O(state=present), attributes necessary to create an entry. Existing entries are never modified. To assert specific
+ attribute values on an existing entry, use M(community.general.ldap_attrs) module instead.
+ - Each attribute value can be a string for single-valued attributes or a list of strings for multi-valued attributes.
+ - If you specify values for this option in YAML, please note that you can improve readability for long string values
+ by using YAML block modifiers as seen in the examples for this module.
+ - Note that when using values that YAML/ansible-core interprets as other types, like V(yes), V(no) (booleans), or V(2.10)
+ (float), make sure to quote them if these are meant to be strings. Otherwise the wrong values may be sent to LDAP.
type: dict
default: {}
objectClass:
description:
- - If O(state=present), value or list of values to use when creating
- the entry. It can either be a string or an actual list of
- strings.
+ - If O(state=present), value or list of values to use when creating the entry. It can either be a string or an actual
+ list of strings.
type: list
elements: str
state:
@@ -66,19 +52,17 @@ options:
type: str
recursive:
description:
- - If O(state=delete), a flag indicating whether a single entry or the
- whole branch must be deleted.
+ - If O(state=delete), a flag indicating whether a single entry or the whole branch must be deleted.
type: bool
default: false
version_added: 4.6.0
extends_documentation_fragment:
- community.general.ldap.documentation
- community.general.attributes
-
-'''
+"""
-EXAMPLES = """
+EXAMPLES = r"""
- name: Make sure we have a parent entry for users
community.general.ldap_entry:
dn: ou=users,dc=example,dc=com
@@ -103,19 +87,19 @@ EXAMPLES = """
attributes:
description: An LDAP Administrator
roleOccupant:
- - cn=Chocs Puddington,ou=Information Technology,dc=example,dc=com
- - cn=Alice Stronginthebrain,ou=Information Technology,dc=example,dc=com
+ - cn=Chocs Puddington,ou=Information Technology,dc=example,dc=com
+ - cn=Alice Stronginthebrain,ou=Information Technology,dc=example,dc=com
olcAccess:
- - >-
- {0}to attrs=userPassword,shadowLastChange
- by self write
- by anonymous auth
- by dn="cn=admin,dc=example,dc=com" write
- by * none'
- - >-
- {1}to dn.base="dc=example,dc=com"
- by dn="cn=admin,dc=example,dc=com" write
- by * read
+ - >-
+ {0}to attrs=userPassword,shadowLastChange
+ by self write
+ by anonymous auth
+ by dn="cn=admin,dc=example,dc=com" write
+ by * none'
+ - >-
+ {1}to dn.base="dc=example,dc=com"
+ by dn="cn=admin,dc=example,dc=com" write
+ by * read
- name: Get rid of an old entry
community.general.ldap_entry:
@@ -143,7 +127,7 @@ EXAMPLES = """
"""
-RETURN = """
+RETURN = r"""
# Default return values
"""
diff --git a/plugins/modules/ldap_inc.py b/plugins/modules/ldap_inc.py
new file mode 100644
index 0000000000..224027f666
--- /dev/null
+++ b/plugins/modules/ldap_inc.py
@@ -0,0 +1,243 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2024, Philippe Duveau
+# Copyright (c) 2019, Maciej Delmanowski (ldap_attrs.py)
+# Copyright (c) 2017, Alexander Korinek (ldap_attrs.py)
+# Copyright (c) 2016, Peter Sagerson (ldap_attrs.py)
+# Copyright (c) 2016, Jiri Tyr (ldap_attrs.py)
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# The code of this module is derived from that of ldap_attrs.py
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r"""
+module: ldap_inc
+short_description: Use the Modify-Increment LDAP V3 feature to increment an attribute value
+version_added: 10.2.0
+description:
+ - Atomically increments the value of an attribute and return its new value.
+notes:
+ - When implemented by the directory server, the module uses the ModifyIncrement extension defined in L(RFC4525, https://www.rfc-editor.org/rfc/rfc4525.html)
+ and the control PostRead. This extension and the control are implemented in OpenLdap but not all directory servers implement
+ them. In this case, the module automatically uses a more classic method based on two phases, first the current value is
+ read then the modify operation remove the old value and add the new one in a single request. If the value has changed
+ by a concurrent call then the remove action fails. Then the sequence is retried 3 times before raising an error to the
+ playbook. In an heavy modification environment, the module does not guarante to be systematically successful.
+ - This only deals with integer attribute of an existing entry. To modify attributes of an entry, see M(community.general.ldap_attrs)
+ or to add or remove whole entries, see M(community.general.ldap_entry).
+author:
+ - Philippe Duveau (@pduveau)
+requirements:
+ - python-ldap
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ dn:
+ required: true
+ type: str
+ description:
+ - The DN entry containing the attribute to increment.
+ attribute:
+ required: true
+ type: str
+ description:
+ - The attribute to increment.
+ increment:
+ required: false
+ type: int
+ default: 1
+ description:
+ - The value of the increment to apply.
+ method:
+ required: false
+ type: str
+ default: auto
+ choices: [auto, rfc4525, legacy]
+ description:
+ - If V(auto), the module determines automatically the method to use.
+ - If V(rfc4525) or V(legacy) force to use the corresponding method.
+extends_documentation_fragment:
+ - community.general.ldap.documentation
+ - community.general.attributes
+"""
+
+
+EXAMPLES = r"""
+- name: Increments uidNumber 1 Number for example.com
+ community.general.ldap_inc:
+ dn: "cn=uidNext,ou=unix-management,dc=example,dc=com"
+ attribute: "uidNumber"
+ increment: "1"
+ register: ldap_uidNumber_sequence
+
+- name: Modifies the user to define its identification number (uidNumber) when incrementation is successful
+ community.general.ldap_attrs:
+ dn: "cn=john,ou=posix-users,dc=example,dc=com"
+ state: present
+ attributes:
+ - uidNumber: "{{ ldap_uidNumber_sequence.value }}"
+ when: ldap_uidNumber_sequence.incremented
+"""
+
+
+RETURN = r"""
+incremented:
+ description:
+ - It is set to V(true) if the attribute value has changed.
+ returned: success
+ type: bool
+ sample: true
+
+attribute:
+ description:
+ - The name of the attribute that was incremented.
+ returned: success
+ type: str
+ sample: uidNumber
+
+value:
+ description:
+ - The new value after incrementing.
+ returned: success
+ type: str
+ sample: "2"
+
+rfc4525:
+ description:
+ - Is V(true) if the method used to increment is based on RFC4525, V(false) if legacy.
+ returned: success
+ type: bool
+ sample: true
+"""
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils.common.text.converters import to_native, to_bytes
+from ansible_collections.community.general.plugins.module_utils import deps
+from ansible_collections.community.general.plugins.module_utils.ldap import LdapGeneric, gen_specs, ldap_required_together
+
+with deps.declare("ldap", reason=missing_required_lib('python-ldap')):
+ import ldap
+ import ldap.controls.readentry
+
+
+class LdapInc(LdapGeneric):
+ def __init__(self, module):
+ LdapGeneric.__init__(self, module)
+ # Shortcuts
+ self.attr = self.module.params['attribute']
+ self.increment = self.module.params['increment']
+ self.method = self.module.params['method']
+
+ def inc_rfc4525(self):
+ return [(ldap.MOD_INCREMENT, self.attr, [to_bytes(str(self.increment))])]
+
+ def inc_legacy(self, curr_val, new_val):
+ return [(ldap.MOD_DELETE, self.attr, [to_bytes(curr_val)]),
+ (ldap.MOD_ADD, self.attr, [to_bytes(new_val)])]
+
+ def serverControls(self):
+ return [ldap.controls.readentry.PostReadControl(attrList=[self.attr])]
+
+ LDAP_MOD_INCREMENT = to_bytes("1.3.6.1.1.14")
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=gen_specs(
+ attribute=dict(type='str', required=True),
+ increment=dict(type='int', default=1),
+ method=dict(type='str', default='auto', choices=['auto', 'rfc4525', 'legacy']),
+ ),
+ supports_check_mode=True,
+ required_together=ldap_required_together(),
+ )
+
+ deps.validate(module)
+
+ # Instantiate the LdapAttr object
+ mod = LdapInc(module)
+
+ changed = False
+ ret = ""
+ rfc4525 = False
+
+ try:
+ if mod.increment != 0 and not module.check_mode:
+ changed = True
+
+ if mod.method != "auto":
+ rfc4525 = mod.method == "rfc425"
+ else:
+ rootDSE = mod.connection.search_ext_s(
+ base="",
+ scope=ldap.SCOPE_BASE,
+ attrlist=["*", "+"])
+ if len(rootDSE) == 1:
+ if to_bytes(ldap.CONTROL_POST_READ) in rootDSE[0][1]["supportedControl"] and (
+ mod.LDAP_MOD_INCREMENT in rootDSE[0][1]["supportedFeatures"] or
+ mod.LDAP_MOD_INCREMENT in rootDSE[0][1]["supportedExtension"]
+ ):
+ rfc4525 = True
+
+ if rfc4525:
+ dummy, dummy, dummy, resp_ctrls = mod.connection.modify_ext_s(
+ dn=mod.dn,
+ modlist=mod.inc_rfc4525(),
+ serverctrls=mod.serverControls(),
+ clientctrls=None)
+ if len(resp_ctrls) == 1:
+ ret = resp_ctrls[0].entry[mod.attr][0]
+
+ else:
+ tries = 0
+ max_tries = 3
+ while tries < max_tries:
+ tries = tries + 1
+ result = mod.connection.search_ext_s(
+ base=mod.dn,
+ scope=ldap.SCOPE_BASE,
+ filterstr="(%s=*)" % mod.attr,
+ attrlist=[mod.attr])
+ if len(result) != 1:
+ module.fail_json(msg="The entry does not exist or does not contain the specified attribute.")
+ return
+ try:
+ ret = str(int(result[0][1][mod.attr][0]) + mod.increment)
+ # if the current value first arg in inc_legacy has changed then the modify will fail
+ mod.connection.modify_s(
+ dn=mod.dn,
+ modlist=mod.inc_legacy(result[0][1][mod.attr][0], ret))
+ break
+ except ldap.NO_SUCH_ATTRIBUTE:
+ if tries == max_tries:
+ module.fail_json(msg="The increment could not be applied after " + str(max_tries) + " tries.")
+ return
+
+ else:
+ result = mod.connection.search_ext_s(
+ base=mod.dn,
+ scope=ldap.SCOPE_BASE,
+ filterstr="(%s=*)" % mod.attr,
+ attrlist=[mod.attr])
+ if len(result) == 1:
+ ret = str(int(result[0][1][mod.attr][0]) + mod.increment)
+ changed = mod.increment != 0
+ else:
+ module.fail_json(msg="The entry does not exist or does not contain the specified attribute.")
+
+ except Exception as e:
+ module.fail_json(msg="Attribute action failed.", details=to_native(e))
+
+ module.exit_json(changed=changed, incremented=changed, attribute=mod.attr, value=ret, rfc4525=rfc4525)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/plugins/modules/ldap_passwd.py b/plugins/modules/ldap_passwd.py
index 5044586b0f..b29254f8c6 100644
--- a/plugins/modules/ldap_passwd.py
+++ b/plugins/modules/ldap_passwd.py
@@ -9,21 +9,12 @@ from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
module: ldap_passwd
short_description: Set passwords in LDAP
description:
- - Set a password for an LDAP entry. This module only asserts that
- a given password is valid for a given entry. To assert the
- existence of an entry, see M(community.general.ldap_entry).
-notes:
- - The default authentication settings will attempt to use a SASL EXTERNAL
- bind over a UNIX domain socket. This works well with the default Ubuntu
- install for example, which includes a C(cn=peercred,cn=external,cn=auth) ACL
- rule allowing root to modify the server configuration. If you need to use
- a simple bind to access your server, pass the credentials in O(bind_dn)
- and O(bind_pw).
+ - Set a password for an LDAP entry. This module only asserts that a given password is valid for a given entry. To assert
+ the existence of an entry, see M(community.general.ldap_entry).
author:
- Keller Fuchs (@KellerFuchs)
requirements:
@@ -41,10 +32,9 @@ options:
extends_documentation_fragment:
- community.general.ldap.documentation
- community.general.attributes
+"""
-'''
-
-EXAMPLES = """
+EXAMPLES = r"""
- name: Set a password for the admin user
community.general.ldap_passwd:
dn: cn=admin,dc=example,dc=com
@@ -56,13 +46,13 @@ EXAMPLES = """
passwd: "{{ item.value }}"
with_dict:
alice: alice123123
- bob: "|30b!"
+ bob: "|30b!"
admin: "{{ vault_secret }}"
"""
-RETURN = """
+RETURN = r"""
modlist:
- description: list of modified parameters
+ description: List of modified parameters.
returned: success
type: list
sample:
diff --git a/plugins/modules/ldap_search.py b/plugins/modules/ldap_search.py
index 7958f86e0b..47c4d8d64d 100644
--- a/plugins/modules/ldap_search.py
+++ b/plugins/modules/ldap_search.py
@@ -10,19 +10,11 @@ from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r"""
----
module: ldap_search
version_added: '0.2.0'
short_description: Search for entries in a LDAP server
description:
- Return the results of an LDAP search.
-notes:
- - The default authentication settings will attempt to use a SASL EXTERNAL
- bind over a UNIX domain socket. This works well with the default Ubuntu
- install for example, which includes a C(cn=peercred,cn=external,cn=auth) ACL
- rule allowing root to modify the server configuration. If you need to use
- a simple bind to access your server, pass the credentials in O(bind_dn)
- and O(bind_pw).
author:
- Sebastian Pfahl (@eryx12o45)
requirements:
@@ -55,30 +47,27 @@ options:
type: list
elements: str
description:
- - A list of attributes for limiting the result. Use an
- actual list or a comma-separated string.
+ - A list of attributes for limiting the result. Use an actual list or a comma-separated string.
schema:
default: false
type: bool
description:
- - Set to V(true) to return the full attribute schema of entries, not
- their attribute values. Overrides O(attrs) when provided.
+ - Set to V(true) to return the full attribute schema of entries, not their attribute values. Overrides O(attrs) when
+ provided.
page_size:
default: 0
type: int
description:
- - The page size when performing a simple paged result search (RFC 2696).
- This setting can be tuned to reduce issues with timeouts and server limits.
+ - The page size when performing a simple paged result search (RFC 2696). This setting can be tuned to reduce issues
+ with timeouts and server limits.
- Setting the page size to V(0) (default) disables paged searching.
version_added: 7.1.0
base64_attributes:
description:
- - If provided, all attribute values returned that are listed in this option
- will be Base64 encoded.
- - If the special value V(*) appears in this list, all attributes will be
- Base64 encoded.
- - All other attribute values will be converted to UTF-8 strings. If they
- contain binary data, please note that invalid UTF-8 bytes will be omitted.
+ - If provided, all attribute values returned that are listed in this option are Base64 encoded.
+ - If the special value V(*) appears in this list, all attributes are Base64 encoded.
+ - All other attribute values are converted to UTF-8 strings. If they contain binary data, please note that invalid UTF-8
+ bytes are omitted.
type: list
elements: str
version_added: 7.0.0
@@ -102,17 +91,16 @@ EXAMPLES = r"""
register: ldap_group_gids
"""
-RESULTS = """
+# @FIXME RV 'results' is meant to be used when 'loop:' was used with the module.
+RESULTS = r"""
results:
description:
- - For every entry found, one dictionary will be returned.
+ - For every entry found, one dictionary is returned.
- Every dictionary contains a key C(dn) with the entry's DN as a value.
- - Every attribute of the entry found is added to the dictionary. If the key
- has precisely one value, that value is taken directly, otherwise the key's
- value is a list.
- - Note that all values (for single-element lists) and list elements (for multi-valued
- lists) will be UTF-8 strings. Some might contain Base64-encoded binary data; which
- ones is determined by the O(base64_attributes) option.
+ - Every attribute of the entry found is added to the dictionary. If the key has precisely one value, that value is taken
+ directly, otherwise the key's value is a list.
+ - Note that all values (for single-element lists) and list elements (for multi-valued lists) are UTF-8 strings. Some might
+ contain Base64-encoded binary data; which ones is determined by the O(base64_attributes) option.
type: list
elements: dict
"""
diff --git a/plugins/modules/librato_annotation.py b/plugins/modules/librato_annotation.py
index ebfb751546..1087cb426c 100644
--- a/plugins/modules/librato_annotation.py
+++ b/plugins/modules/librato_annotation.py
@@ -9,74 +9,76 @@ from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
module: librato_annotation
-short_description: Create an annotation in librato
+short_description: Create an annotation in Librato
description:
- - Create an annotation event on the given annotation stream :name. If the annotation stream does not exist, it will be created automatically
+ - Create an annotation event on the given annotation stream O(name). If the annotation stream does not exist, it creates
+ one automatically.
author: "Seth Edwards (@Sedward)"
requirements: []
extends_documentation_fragment:
- - community.general.attributes
+ - community.general.attributes
attributes:
- check_mode:
- support: none
- diff_mode:
- support: none
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
options:
- user:
- type: str
- description:
- - Librato account username
- required: true
- api_key:
- type: str
- description:
- - Librato account api key
- required: true
- name:
- type: str
- description:
- - The annotation stream name
- - If the annotation stream does not exist, it will be created automatically
- required: false
- title:
- type: str
- description:
- - The title of an annotation is a string and may contain spaces
- - The title should be a short, high-level summary of the annotation e.g. v45 Deployment
- required: true
- source:
- type: str
- description:
- - A string which describes the originating source of an annotation when that annotation is tracked across multiple members of a population
- required: false
+ user:
+ type: str
description:
- type: str
- description:
- - The description contains extra metadata about a particular annotation
- - The description should contain specifics on the individual annotation e.g. Deployed 9b562b2 shipped new feature foo!
- required: false
- start_time:
- type: int
- description:
- - The unix timestamp indicating the time at which the event referenced by this annotation started
- required: false
- end_time:
- type: int
- description:
- - The unix timestamp indicating the time at which the event referenced by this annotation ended
- - For events that have a duration, this is a useful way to annotate the duration of the event
- required: false
- links:
- type: list
- elements: dict
- description:
- - See examples
-'''
+ - Librato account username.
+ required: true
+ api_key:
+ type: str
+ description:
+ - Librato account API key.
+ required: true
+ name:
+ type: str
+ description:
+ - The annotation stream name.
+ - If the annotation stream does not exist, it creates one automatically.
+ required: false
+ title:
+ type: str
+ description:
+ - The title of an annotation is a string and may contain spaces.
+ - The title should be a short, high-level summary of the annotation for example V(v45 Deployment).
+ required: true
+ source:
+ type: str
+ description:
+ - A string which describes the originating source of an annotation when that annotation is tracked across multiple members
+ of a population.
+ required: false
+ description:
+ type: str
+ description:
+ - The description contains extra metadata about a particular annotation.
+ - The description should contain specifics on the individual annotation for example V(Deployed 9b562b2 shipped new feature
+ foo!).
+ required: false
+ start_time:
+ type: int
+ description:
+ - The unix timestamp indicating the time at which the event referenced by this annotation started.
+ required: false
+ end_time:
+ type: int
+ description:
+ - The unix timestamp indicating the time at which the event referenced by this annotation ended.
+ - For events that have a duration, this is a useful way to annotate the duration of the event.
+ required: false
+ links:
+ type: list
+ elements: dict
+ description:
+ - See examples.
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Create a simple annotation event with a source
community.general.librato_annotation:
user: user@example.com
@@ -105,7 +107,7 @@ EXAMPLES = '''
description: This is a detailed description of maintenance
start_time: 1395940006
end_time: 1395954406
-'''
+"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.urls import fetch_url
@@ -158,12 +160,12 @@ def main():
argument_spec=dict(
user=dict(required=True),
api_key=dict(required=True, no_log=True),
- name=dict(required=False),
+ name=dict(),
title=dict(required=True),
- source=dict(required=False),
- description=dict(required=False),
- start_time=dict(required=False, default=None, type='int'),
- end_time=dict(required=False, default=None, type='int'),
+ source=dict(),
+ description=dict(),
+ start_time=dict(type='int'),
+ end_time=dict(type='int'),
links=dict(type='list', elements='dict')
)
)
diff --git a/plugins/modules/linode.py b/plugins/modules/linode.py
index 9b0dabdff2..d2c5714d47 100644
--- a/plugins/modules/linode.py
+++ b/plugins/modules/linode.py
@@ -8,8 +8,7 @@
from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
module: linode
short_description: Manage instances on the Linode Public Cloud
description:
@@ -24,34 +23,33 @@ attributes:
options:
state:
description:
- - Indicate desired state of the resource
- choices: [ absent, active, deleted, present, restarted, started, stopped ]
+ - Indicate desired state of the resource.
+ choices: [absent, active, deleted, present, restarted, started, stopped]
default: present
type: str
api_key:
description:
- - Linode API key.
- - E(LINODE_API_KEY) environment variable can be used instead.
+ - Linode API key.
+ - E(LINODE_API_KEY) environment variable can be used instead.
type: str
required: true
name:
description:
- - Name to give the instance (alphanumeric, dashes, underscore).
- - To keep sanity on the Linode Web Console, name is prepended with C(LinodeID-).
+ - Name to give the instance (alphanumeric, dashes, underscore).
+ - To keep sanity on the Linode Web Console, name is prepended with C(LinodeID-).
required: true
type: str
displaygroup:
description:
- - Add the instance to a Display Group in Linode Manager.
+ - Add the instance to a Display Group in Linode Manager.
type: str
default: ''
linode_id:
description:
- - Unique ID of a linode server. This value is read-only in the sense that
- if you specify it on creation of a Linode it will not be used. The
- Linode API generates these IDs and we can those generated value here to
- reference a Linode more specifically. This is useful for idempotence.
- aliases: [ lid ]
+ - Unique ID of a Linode server. This value is read-only in the sense that if you specify it on creation of a Linode
+ it is not used. The Linode API generates these IDs and we can those generated value here to reference a Linode more
+ specifically. This is useful for idempotency.
+ aliases: [lid]
type: int
additional_disks:
description:
@@ -61,119 +59,118 @@ options:
elements: dict
alert_bwin_enabled:
description:
- - Set status of bandwidth in alerts.
+ - Set status of bandwidth in alerts.
type: bool
alert_bwin_threshold:
description:
- - Set threshold in MB of bandwidth in alerts.
+ - Set threshold in MB of bandwidth in alerts.
type: int
alert_bwout_enabled:
description:
- - Set status of bandwidth out alerts.
+ - Set status of bandwidth out alerts.
type: bool
alert_bwout_threshold:
description:
- - Set threshold in MB of bandwidth out alerts.
+ - Set threshold in MB of bandwidth out alerts.
type: int
alert_bwquota_enabled:
description:
- - Set status of bandwidth quota alerts as percentage of network transfer quota.
+ - Set status of bandwidth quota alerts as percentage of network transfer quota.
type: bool
alert_bwquota_threshold:
description:
- - Set threshold in MB of bandwidth quota alerts.
+ - Set threshold in MB of bandwidth quota alerts.
type: int
alert_cpu_enabled:
description:
- - Set status of receiving CPU usage alerts.
+ - Set status of receiving CPU usage alerts.
type: bool
alert_cpu_threshold:
description:
- - Set percentage threshold for receiving CPU usage alerts. Each CPU core adds 100% to total.
+ - Set percentage threshold for receiving CPU usage alerts. Each CPU core adds 100% to total.
type: int
alert_diskio_enabled:
description:
- - Set status of receiving disk IO alerts.
+ - Set status of receiving disk IO alerts.
type: bool
alert_diskio_threshold:
description:
- - Set threshold for average IO ops/sec over 2 hour period.
+ - Set threshold for average IO ops/sec over 2 hour period.
type: int
backupweeklyday:
description:
- - Day of the week to take backups.
+ - Day of the week to take backups.
type: int
backupwindow:
description:
- - The time window in which backups will be taken.
+ - The time window in which backups are taken.
type: int
plan:
description:
- - plan to use for the instance (Linode plan)
+ - Plan to use for the instance (Linode plan).
type: int
payment_term:
description:
- - payment term to use for the instance (payment term in months)
+ - Payment term to use for the instance (payment term in months).
default: 1
- choices: [ 1, 12, 24 ]
+ choices: [1, 12, 24]
type: int
password:
description:
- - root password to apply to a new server (auto generated if missing)
+ - Root password to apply to a new server (auto generated if missing).
type: str
private_ip:
description:
- - Add private IPv4 address when Linode is created.
- - Default is V(false).
+ - Add private IPv4 address when Linode is created.
+ - Default is V(false).
type: bool
ssh_pub_key:
description:
- - SSH public key applied to root user
+ - SSH public key applied to root user.
type: str
swap:
description:
- - swap size in MB
+ - Swap size in MB.
default: 512
type: int
distribution:
description:
- - distribution to use for the instance (Linode Distribution)
+ - Distribution to use for the instance (Linode Distribution).
type: int
datacenter:
description:
- - datacenter to create an instance in (Linode Datacenter)
+ - Datacenter to create an instance in (Linode Datacenter).
type: int
kernel_id:
description:
- - kernel to use for the instance (Linode Kernel)
+ - Kernel to use for the instance (Linode Kernel).
type: int
wait:
description:
- - wait for the instance to be in state V(running) before returning
+ - Wait for the instance to be in state V(running) before returning.
type: bool
default: true
wait_timeout:
description:
- - how long before wait gives up, in seconds
+ - How long before wait gives up, in seconds.
default: 300
type: int
watchdog:
description:
- - Set status of Lassie watchdog.
+ - Set status of Lassie watchdog.
type: bool
default: true
requirements:
- - linode-python
+ - linode-python
author:
-- Vincent Viallet (@zbal)
+ - Vincent Viallet (@zbal)
notes:
- Please note, linode-python does not have python 3 support.
- This module uses the now deprecated v3 of the Linode API.
- Please review U(https://www.linode.com/api/linode) for determining the required parameters.
-'''
-
-EXAMPLES = '''
+"""
+EXAMPLES = r"""
- name: Create a new Linode
community.general.linode:
name: linode-test1
@@ -185,97 +182,97 @@ EXAMPLES = '''
- name: Create a server with a private IP Address
community.general.linode:
- module: linode
- api_key: 'longStringFromLinodeApi'
- name: linode-test1
- plan: 1
- datacenter: 2
- distribution: 99
- password: 'superSecureRootPassword'
- private_ip: true
- ssh_pub_key: 'ssh-rsa qwerty'
- swap: 768
- wait: true
- wait_timeout: 600
- state: present
+ module: linode
+ api_key: 'longStringFromLinodeApi'
+ name: linode-test1
+ plan: 1
+ datacenter: 2
+ distribution: 99
+ password: 'superSecureRootPassword'
+ private_ip: true
+ ssh_pub_key: 'ssh-rsa qwerty'
+ swap: 768
+ wait: true
+ wait_timeout: 600
+ state: present
delegate_to: localhost
register: linode_creation
- name: Fully configure new server
community.general.linode:
- api_key: 'longStringFromLinodeApi'
- name: linode-test1
- plan: 4
- datacenter: 2
- distribution: 99
- kernel_id: 138
- password: 'superSecureRootPassword'
- private_ip: true
- ssh_pub_key: 'ssh-rsa qwerty'
- swap: 768
- wait: true
- wait_timeout: 600
- state: present
- alert_bwquota_enabled: true
- alert_bwquota_threshold: 80
- alert_bwin_enabled: true
- alert_bwin_threshold: 10
- alert_cpu_enabled: true
- alert_cpu_threshold: 210
- alert_bwout_enabled: true
- alert_bwout_threshold: 10
- alert_diskio_enabled: true
- alert_diskio_threshold: 10000
- backupweeklyday: 1
- backupwindow: 2
- displaygroup: 'test'
- additional_disks:
+ api_key: 'longStringFromLinodeApi'
+ name: linode-test1
+ plan: 4
+ datacenter: 2
+ distribution: 99
+ kernel_id: 138
+ password: 'superSecureRootPassword'
+ private_ip: true
+ ssh_pub_key: 'ssh-rsa qwerty'
+ swap: 768
+ wait: true
+ wait_timeout: 600
+ state: present
+ alert_bwquota_enabled: true
+ alert_bwquota_threshold: 80
+ alert_bwin_enabled: true
+ alert_bwin_threshold: 10
+ alert_cpu_enabled: true
+ alert_cpu_threshold: 210
+ alert_bwout_enabled: true
+ alert_bwout_threshold: 10
+ alert_diskio_enabled: true
+ alert_diskio_threshold: 10000
+ backupweeklyday: 1
+ backupwindow: 2
+ displaygroup: 'test'
+ additional_disks:
- {Label: 'disk1', Size: 2500, Type: 'raw'}
- {Label: 'newdisk', Size: 2000}
- watchdog: true
+ watchdog: true
delegate_to: localhost
register: linode_creation
- name: Ensure a running server (create if missing)
community.general.linode:
- api_key: 'longStringFromLinodeApi'
- name: linode-test1
- plan: 1
- datacenter: 2
- distribution: 99
- password: 'superSecureRootPassword'
- ssh_pub_key: 'ssh-rsa qwerty'
- swap: 768
- wait: true
- wait_timeout: 600
- state: present
+ api_key: 'longStringFromLinodeApi'
+ name: linode-test1
+ plan: 1
+ datacenter: 2
+ distribution: 99
+ password: 'superSecureRootPassword'
+ ssh_pub_key: 'ssh-rsa qwerty'
+ swap: 768
+ wait: true
+ wait_timeout: 600
+ state: present
delegate_to: localhost
register: linode_creation
- name: Delete a server
community.general.linode:
- api_key: 'longStringFromLinodeApi'
- name: linode-test1
- linode_id: "{{ linode_creation.instance.id }}"
- state: absent
+ api_key: 'longStringFromLinodeApi'
+ name: linode-test1
+ linode_id: "{{ linode_creation.instance.id }}"
+ state: absent
delegate_to: localhost
- name: Stop a server
community.general.linode:
- api_key: 'longStringFromLinodeApi'
- name: linode-test1
- linode_id: "{{ linode_creation.instance.id }}"
- state: stopped
+ api_key: 'longStringFromLinodeApi'
+ name: linode-test1
+ linode_id: "{{ linode_creation.instance.id }}"
+ state: stopped
delegate_to: localhost
- name: Reboot a server
community.general.linode:
- api_key: 'longStringFromLinodeApi'
- name: linode-test1
- linode_id: "{{ linode_creation.instance.id }}"
- state: restarted
+ api_key: 'longStringFromLinodeApi'
+ name: linode-test1
+ linode_id: "{{ linode_creation.instance.id }}"
+ state: restarted
delegate_to: localhost
-'''
+"""
import time
import traceback
diff --git a/plugins/modules/linode_v4.py b/plugins/modules/linode_v4.py
index da885f3a5f..0095cb9002 100644
--- a/plugins/modules/linode_v4.py
+++ b/plugins/modules/linode_v4.py
@@ -8,8 +8,7 @@
from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
module: linode_v4
short_description: Manage instances on the Linode cloud
description: Manage instances on the Linode cloud.
@@ -18,9 +17,8 @@ requirements:
author:
- Luke Murphy (@decentral1se)
notes:
- - No Linode resizing is currently implemented. This module will, in time,
- replace the current Linode module which uses deprecated API bindings on the
- Linode side.
+ - No Linode resizing is currently implemented. This module aims to replace the current Linode module which uses deprecated
+ API bindings on the Linode side.
extends_documentation_fragment:
- community.general.attributes
attributes:
@@ -31,52 +29,44 @@ attributes:
options:
region:
description:
- - The region of the instance. This is a required parameter only when
- creating Linode instances. See
- U(https://www.linode.com/docs/api/regions/).
+ - The region of the instance. This is a required parameter only when creating Linode instances. See U(https://www.linode.com/docs/api/regions/).
type: str
image:
description:
- - The image of the instance. This is a required parameter only when
- creating Linode instances. See
- U(https://www.linode.com/docs/api/images/).
+ - The image of the instance. This is a required parameter only when creating Linode instances.
+ - See U(https://www.linode.com/docs/api/images/).
type: str
type:
description:
- - The type of the instance. This is a required parameter only when
- creating Linode instances. See
- U(https://www.linode.com/docs/api/linode-types/).
+ - The type of the instance. This is a required parameter only when creating Linode instances.
+ - See U(https://www.linode.com/docs/api/linode-types/).
type: str
label:
description:
- - The instance label. This label is used as the main determiner for
- idempotence for the module and is therefore mandatory.
+ - The instance label. This label is used as the main determiner for idempotency for the module and is therefore mandatory.
type: str
required: true
group:
description:
- - The group that the instance should be marked under. Please note, that
- group labelling is deprecated but still supported. The encouraged
- method for marking instances is to use tags.
+ - The group that the instance should be marked under. Please note, that group labelling is deprecated but still supported.
+ The encouraged method for marking instances is to use tags.
type: str
private_ip:
description:
- - If V(true), the created Linode will have private networking enabled and
- assigned a private IPv4 address.
+ - If V(true), the created Linode instance has private networking enabled and assigned a private IPv4 address.
type: bool
default: false
version_added: 3.0.0
tags:
description:
- - The tags that the instance should be marked under. See
- U(https://www.linode.com/docs/api/tags/).
+ - The tags that the instance should be marked under.
+ - See U(https://www.linode.com/docs/api/tags/).
type: list
elements: str
root_pass:
description:
- - The password for the root user. If not specified, one will be
- generated. This generated password will be available in the task
- success JSON.
+ - The password for the root user. If not specified, it generates a new one. This generated password is available in
+ the task success JSON.
type: str
authorized_keys:
description:
@@ -88,33 +78,31 @@ options:
- The desired instance state.
type: str
choices:
- - present
- - absent
+ - present
+ - absent
required: true
access_token:
description:
- - The Linode API v4 access token. It may also be specified by exposing
- the E(LINODE_ACCESS_TOKEN) environment variable. See
- U(https://www.linode.com/docs/api#access-and-authentication).
+ - The Linode API v4 access token. It may also be specified by exposing the E(LINODE_ACCESS_TOKEN) environment variable.
+ - See U(https://www.linode.com/docs/api#access-and-authentication).
required: true
type: str
stackscript_id:
description:
- The numeric ID of the StackScript to use when creating the instance.
- See U(https://www.linode.com/docs/api/stackscripts/).
+ - See U(https://www.linode.com/docs/api/stackscripts/).
type: int
version_added: 1.3.0
stackscript_data:
description:
- - An object containing arguments to any User Defined Fields present in
- the StackScript used when creating the instance.
- Only valid when a stackscript_id is provided.
- See U(https://www.linode.com/docs/api/stackscripts/).
+ - An object containing arguments to any User Defined Fields present in the StackScript used when creating the instance.
+ Only valid when a O(stackscript_id) is provided.
+ - See U(https://www.linode.com/docs/api/stackscripts/).
type: dict
version_added: 1.3.0
-'''
+"""
-EXAMPLES = """
+EXAMPLES = r"""
- name: Create a new Linode.
community.general.linode_v4:
label: new-linode
@@ -135,50 +123,51 @@ EXAMPLES = """
state: absent
"""
-RETURN = """
+RETURN = r"""
instance:
description: The instance description in JSON serialized form.
returned: Always.
type: dict
- sample: {
- "root_pass": "foobar", # if auto-generated
- "alerts": {
- "cpu": 90,
- "io": 10000,
- "network_in": 10,
- "network_out": 10,
- "transfer_quota": 80
- },
- "backups": {
- "enabled": false,
- "schedule": {
- "day": null,
- "window": null
- }
- },
- "created": "2018-09-26T08:12:33",
- "group": "Foobar Group",
- "hypervisor": "kvm",
- "id": 10480444,
- "image": "linode/centos7",
- "ipv4": [
- "130.132.285.233"
- ],
- "ipv6": "2a82:7e00::h03c:46ff:fe04:5cd2/64",
- "label": "lin-foo",
- "region": "eu-west",
- "specs": {
- "disk": 25600,
- "memory": 1024,
- "transfer": 1000,
- "vcpus": 1
- },
- "status": "running",
- "tags": [],
- "type": "g6-nanode-1",
- "updated": "2018-09-26T10:10:14",
- "watchdog_enabled": true
- }
+ sample:
+ {
+ "root_pass": "foobar", # if auto-generated
+ "alerts": {
+ "cpu": 90,
+ "io": 10000,
+ "network_in": 10,
+ "network_out": 10,
+ "transfer_quota": 80
+ },
+ "backups": {
+ "enabled": false,
+ "schedule": {
+ "day": null,
+ "window": null
+ }
+ },
+ "created": "2018-09-26T08:12:33",
+ "group": "Foobar Group",
+ "hypervisor": "kvm",
+ "id": 10480444,
+ "image": "linode/centos7",
+ "ipv4": [
+ "130.132.285.233"
+ ],
+ "ipv6": "2a82:7e00::h03c:46ff:fe04:5cd2/64",
+ "label": "lin-foo",
+ "region": "eu-west",
+ "specs": {
+ "disk": 25600,
+ "memory": 1024,
+ "transfer": 1000,
+ "vcpus": 1
+ },
+ "status": "running",
+ "tags": [],
+ "type": "g6-nanode-1",
+ "updated": "2018-09-26T10:10:14",
+ "watchdog_enabled": true
+ }
"""
import traceback
diff --git a/plugins/modules/listen_ports_facts.py b/plugins/modules/listen_ports_facts.py
index 08030a8b37..a33c78be3c 100644
--- a/plugins/modules/listen_ports_facts.py
+++ b/plugins/modules/listen_ports_facts.py
@@ -8,21 +8,19 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
-DOCUMENTATION = r'''
----
+DOCUMENTATION = r"""
module: listen_ports_facts
author:
- - Nathan Davison (@ndavison)
+ - Nathan Davison (@ndavison)
description:
- - Gather facts on processes listening on TCP and UDP ports using the C(netstat) or C(ss) commands.
- - This module currently supports Linux only.
+ - Gather facts on processes listening on TCP and UDP ports using the C(netstat) or C(ss) commands.
+ - This module currently supports Linux only.
requirements:
- netstat or ss
short_description: Gather facts on processes listening on TCP and UDP ports
notes:
- - |
- C(ss) returns all processes for each listen address and port.
- This plugin will return each of them, so multiple entries for the same listen address and port are likely in results.
+ - C(ss) returns all processes for each listen address and port.
+ - This plugin returns each of them, so multiple entries for the same listen address and port are likely in results.
extends_documentation_fragment:
- community.general.attributes
- community.general.attributes.facts
@@ -31,7 +29,7 @@ options:
command:
description:
- Override which command to use for fetching listen ports.
- - 'By default module will use first found supported command on the system (in alphanumerical order).'
+ - By default module uses first found supported command on the system (in alphanumerical order).
type: str
choices:
- netstat
@@ -39,15 +37,15 @@ options:
version_added: 4.1.0
include_non_listening:
description:
- - Show both listening and non-listening sockets (for TCP this means established connections).
- - Adds the return values RV(ansible_facts.tcp_listen[].state), RV(ansible_facts.udp_listen[].state),
- RV(ansible_facts.tcp_listen[].foreign_address), and RV(ansible_facts.udp_listen[].foreign_address) to the returned facts.
+ - Show both listening and non-listening sockets (for TCP this means established connections).
+ - Adds the return values RV(ansible_facts.tcp_listen[].state), RV(ansible_facts.udp_listen[].state), RV(ansible_facts.tcp_listen[].foreign_address),
+ and RV(ansible_facts.udp_listen[].foreign_address) to the returned facts.
type: bool
default: false
version_added: 5.4.0
-'''
+"""
-EXAMPLES = r'''
+EXAMPLES = r"""
- name: Gather facts on listening ports
community.general.listen_ports_facts:
@@ -77,11 +75,11 @@ EXAMPLES = r'''
community.general.listen_ports_facts:
command: 'netstat'
include_non_listening: true
-'''
+"""
-RETURN = r'''
+RETURN = r"""
ansible_facts:
- description: Dictionary containing details of TCP and UDP ports with listening servers
+ description: Dictionary containing details of TCP and UDP ports with listening servers.
returned: always
type: complex
contains:
@@ -189,7 +187,7 @@ ansible_facts:
returned: always
type: str
sample: "root"
-'''
+"""
import re
import platform
@@ -399,7 +397,7 @@ def main():
break
if bin_path is None:
- raise EnvironmentError(msg='Unable to find any of the supported commands in PATH: {0}'.format(", ".join(sorted(commands_map))))
+ raise EnvironmentError('Unable to find any of the supported commands in PATH: {0}'.format(", ".join(sorted(commands_map))))
# which ports are listening for connections?
args = commands_map[command]['args']
diff --git a/plugins/modules/lldp.py b/plugins/modules/lldp.py
index fb608ff138..018d9fc307 100644
--- a/plugins/modules/lldp.py
+++ b/plugins/modules/lldp.py
@@ -9,13 +9,12 @@ from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
module: lldp
-requirements: [ lldpctl ]
-short_description: Get details reported by lldp
+requirements: [lldpctl]
+short_description: Get details reported by LLDP
description:
- - Reads data out of lldpctl
+ - Reads data out of C(lldpctl).
extends_documentation_fragment:
- community.general.attributes
attributes:
@@ -23,28 +22,32 @@ attributes:
support: none
diff_mode:
support: none
-options: {}
+options:
+ multivalues:
+ description: If lldpctl outputs an attribute multiple time represent all values as a list.
+ required: false
+ type: bool
+ default: false
author: "Andy Hill (@andyhky)"
notes:
- - Requires lldpd running and lldp enabled on switches
-'''
+ - Requires C(lldpd) running and LLDP enabled on switches.
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
# Retrieve switch/port information
- - name: Gather information from lldp
- community.general.lldp:
+- name: Gather information from LLDP
+ community.general.lldp:
- - name: Print each switch/port
- ansible.builtin.debug:
+- name: Print each switch/port
+ ansible.builtin.debug:
msg: "{{ lldp[item]['chassis']['name'] }} / {{ lldp[item]['port']['ifname'] }}"
- with_items: "{{ lldp.keys() }}"
+ with_items: "{{ lldp.keys() }}"
# TASK: [Print each switch/port] ***********************************************************
# ok: [10.13.0.22] => (item=eth2) => {"item": "eth2", "msg": "switch1.example.com / Gi0/24"}
# ok: [10.13.0.22] => (item=eth1) => {"item": "eth1", "msg": "switch2.example.com / Gi0/3"}
# ok: [10.13.0.22] => (item=eth0) => {"item": "eth0", "msg": "switch3.example.com / Gi0/3"}
-
-'''
+"""
from ansible.module_utils.basic import AnsibleModule
@@ -55,26 +58,49 @@ def gather_lldp(module):
if output:
output_dict = {}
current_dict = {}
- lldp_entries = output.split("\n")
+ lldp_entries = output.strip().split("\n")
+ final = ""
for entry in lldp_entries:
if entry.startswith('lldp'):
path, value = entry.strip().split("=", 1)
path = path.split(".")
path_components, final = path[:-1], path[-1]
+ elif final in current_dict and isinstance(current_dict[final], str):
+ current_dict[final] += '\n' + entry
+ continue
+ elif final in current_dict and isinstance(current_dict[final], list):
+ current_dict[final][-1] += '\n' + entry
+ continue
else:
- value = current_dict[final] + '\n' + entry
+ continue
current_dict = output_dict
for path_component in path_components:
current_dict[path_component] = current_dict.get(path_component, {})
+ if not isinstance(current_dict[path_component], dict):
+ current_dict[path_component] = {'value': current_dict[path_component]}
current_dict = current_dict[path_component]
- current_dict[final] = value
+
+ if final in current_dict and isinstance(current_dict[final], dict) and module.params['multivalues']:
+ current_dict = current_dict[final]
+ final = 'value'
+
+ if final not in current_dict or not module.params['multivalues']:
+ current_dict[final] = value
+ elif isinstance(current_dict[final], str):
+ current_dict[final] = [current_dict[final], value]
+ elif isinstance(current_dict[final], list):
+ current_dict[final].append(value)
+
return output_dict
def main():
- module = AnsibleModule({})
+ module_args = dict(
+ multivalues=dict(type='bool', default=False)
+ )
+ module = AnsibleModule(module_args)
lldp_output = gather_lldp(module)
try:
diff --git a/plugins/modules/locale_gen.py b/plugins/modules/locale_gen.py
index 8886cdc9cd..2e1932c204 100644
--- a/plugins/modules/locale_gen.py
+++ b/plugins/modules/locale_gen.py
@@ -8,40 +8,48 @@
from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
module: locale_gen
short_description: Creates or removes locales
description:
- - Manages locales by editing /etc/locale.gen and invoking locale-gen.
+ - Manages locales in Debian and Ubuntu systems.
author:
- - Augustus Kling (@AugustusKling)
+ - Augustus Kling (@AugustusKling)
extends_documentation_fragment:
- - community.general.attributes
+ - community.general.attributes
attributes:
- check_mode:
- support: full
- diff_mode:
- support: none
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
options:
- name:
- type: list
- elements: str
- description:
- - Name and encoding of the locales, such as V(en_GB.UTF-8).
- - Before community.general 9.3.0, this was a string. Using a string still works.
- required: true
- state:
- type: str
- description:
- - Whether the locale shall be present.
- choices: [ absent, present ]
- default: present
+ name:
+ type: list
+ elements: str
+ description:
+ - Name and encoding of the locales, such as V(en_GB.UTF-8).
+ - Before community.general 9.3.0, this was a string. Using a string still works.
+ required: true
+ state:
+ type: str
+ description:
+ - Whether the locales shall be present.
+ choices: [absent, present]
+ default: present
notes:
- - This module does not support RHEL-based systems.
-'''
+ - If C(/etc/locale.gen) exists, the module assumes to be using the B(glibc) mechanism, else if C(/var/lib/locales/supported.d/)
+ exists it assumes to be using the B(ubuntu_legacy) mechanism, else it raises an error.
+ - When using glibc mechanism, it manages locales by editing C(/etc/locale.gen) and running C(locale-gen).
+ - When using ubuntu_legacy mechanism, it manages locales by editing C(/var/lib/locales/supported.d/local) and then running
+ C(locale-gen).
+ - Please note that the code path that uses ubuntu_legacy mechanism has not been tested for a while, because Ubuntu is already
+ using the glibc mechanism. There is no support for that, given our inability to test it. Therefore, that mechanism is
+ B(deprecated) and will be removed in community.general 13.0.0.
+ - Currently the module is B(only supported for Debian and Ubuntu) systems.
+ - This module requires the package C(locales) installed in Debian and Ubuntu systems.
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Ensure a locale exists
community.general.locale_gen:
name: de_CH.UTF-8
@@ -53,7 +61,19 @@ EXAMPLES = '''
- en_GB.UTF-8
- nl_NL.UTF-8
state: present
-'''
+"""
+
+RETURN = r"""
+mechanism:
+ description: Mechanism used to deploy the locales.
+ type: str
+ choices:
+ - glibc
+ - ubuntu_legacy
+ returned: success
+ sample: glibc
+ version_added: 10.2.0
+"""
import os
import re
@@ -64,22 +84,25 @@ from ansible_collections.community.general.plugins.module_utils.mh.deco import c
from ansible_collections.community.general.plugins.module_utils.locale_gen import locale_runner, locale_gen_runner
-class LocaleGen(StateModuleHelper):
- LOCALE_NORMALIZATION = {
- ".utf8": ".UTF-8",
- ".eucjp": ".EUC-JP",
- ".iso885915": ".ISO-8859-15",
- ".cp1251": ".CP1251",
- ".koi8r": ".KOI8-R",
- ".armscii8": ".ARMSCII-8",
- ".euckr": ".EUC-KR",
- ".gbk": ".GBK",
- ".gb18030": ".GB18030",
- ".euctw": ".EUC-TW",
- }
- LOCALE_GEN = "/etc/locale.gen"
- LOCALE_SUPPORTED = "/var/lib/locales/supported.d/"
+ETC_LOCALE_GEN = "/etc/locale.gen"
+VAR_LIB_LOCALES = "/var/lib/locales/supported.d"
+VAR_LIB_LOCALES_LOCAL = os.path.join(VAR_LIB_LOCALES, "local")
+SUPPORTED_LOCALES = "/usr/share/i18n/SUPPORTED"
+LOCALE_NORMALIZATION = {
+ ".utf8": ".UTF-8",
+ ".eucjp": ".EUC-JP",
+ ".iso885915": ".ISO-8859-15",
+ ".cp1251": ".CP1251",
+ ".koi8r": ".KOI8-R",
+ ".armscii8": ".ARMSCII-8",
+ ".euckr": ".EUC-KR",
+ ".gbk": ".GBK",
+ ".gb18030": ".GB18030",
+ ".euctw": ".EUC-TW",
+}
+
+class LocaleGen(StateModuleHelper):
output_params = ["name"]
module = dict(
argument_spec=dict(
@@ -88,17 +111,37 @@ class LocaleGen(StateModuleHelper):
),
supports_check_mode=True,
)
- use_old_vardict = False
def __init_module__(self):
- self.vars.set("ubuntu_mode", False)
- if os.path.exists(self.LOCALE_SUPPORTED):
+ self.MECHANISMS = dict(
+ ubuntu_legacy=dict(
+ available=SUPPORTED_LOCALES,
+ apply_change=self.apply_change_ubuntu_legacy,
+ ),
+ glibc=dict(
+ available=SUPPORTED_LOCALES,
+ apply_change=self.apply_change_glibc,
+ ),
+ )
+
+ if os.path.exists(ETC_LOCALE_GEN):
+ self.vars.ubuntu_mode = False
+ self.vars.mechanism = "glibc"
+ elif os.path.exists(VAR_LIB_LOCALES):
self.vars.ubuntu_mode = True
+ self.vars.mechanism = "ubuntu_legacy"
+ self.module.deprecate(
+ "On this machine mechanism=ubuntu_legacy is used. This mechanism is deprecated and will be removed from"
+ " in community.general 13.0.0. If you see this message on a modern Debian or Ubuntu version,"
+ " please create an issue in the community.general repository",
+ version="13.0.0", collection_name="community.general"
+ )
else:
- if not os.path.exists(self.LOCALE_GEN):
- self.do_raise("{0} and {1} are missing. Is the package \"locales\" installed?".format(
- self.LOCALE_SUPPORTED, self.LOCALE_GEN
- ))
+ self.do_raise('{0} and {1} are missing. Is the package "locales" installed?'.format(
+ VAR_LIB_LOCALES, ETC_LOCALE_GEN
+ ))
+
+ self.runner = locale_runner(self.module)
self.assert_available()
self.vars.set("is_present", self.is_present(), output=False)
@@ -116,30 +159,26 @@ class LocaleGen(StateModuleHelper):
checking either :
* if the locale is present in /etc/locales.gen
* or if the locale is present in /usr/share/i18n/SUPPORTED"""
- __regexp = r'^#?\s*(?P\S+[\._\S]+) (?P\S+)\s*$'
- if self.vars.ubuntu_mode:
- __locales_available = '/usr/share/i18n/SUPPORTED'
- else:
- __locales_available = '/etc/locale.gen'
+ regexp = r'^\s*#?\s*(?P\S+[\._\S]+) (?P\S+)\s*$'
+ locales_available = self.MECHANISMS[self.vars.mechanism]["available"]
- re_compiled = re.compile(__regexp)
- with open(__locales_available, 'r') as fd:
+ re_compiled = re.compile(regexp)
+ with open(locales_available, 'r') as fd:
lines = fd.readlines()
- res = [re_compiled.match(line) for line in lines]
- if self.verbosity >= 4:
- self.vars.available_lines = lines
+ res = [re_compiled.match(line) for line in lines]
+ self.vars.set("available_lines", lines, verbosity=4)
- locales_not_found = []
- for locale in self.vars.name:
- # Check if the locale is not found in any of the matches
- if not any(match and match.group("locale") == locale for match in res):
- locales_not_found.append(locale)
+ locales_not_found = []
+ for locale in self.vars.name:
+ # Check if the locale is not found in any of the matches
+ if not any(match and match.group("locale") == locale for match in res):
+ locales_not_found.append(locale)
# locale may be installed but not listed in the file, for example C.UTF-8 in some systems
locales_not_found = self.locale_get_not_present(locales_not_found)
if locales_not_found:
- self.do_raise("The following locales you've entered are not available on your system: {0}".format(', '.join(locales_not_found)))
+ self.do_raise("The following locales you have entered are not available on your system: {0}".format(', '.join(locales_not_found)))
def is_present(self):
return not self.locale_get_not_present(self.vars.name)
@@ -161,13 +200,13 @@ class LocaleGen(StateModuleHelper):
def fix_case(self, name):
"""locale -a might return the encoding in either lower or upper case.
Passing through this function makes them uniform for comparisons."""
- for s, r in self.LOCALE_NORMALIZATION.items():
+ for s, r in LOCALE_NORMALIZATION.items():
name = name.replace(s, r)
return name
- def set_locale(self, names, enabled=True):
+ def set_locale_glibc(self, names, enabled=True):
""" Sets the state of the locale. Defaults to enabled. """
- with open("/etc/locale.gen", 'r') as fr:
+ with open(ETC_LOCALE_GEN, 'r') as fr:
lines = fr.readlines()
locale_regexes = []
@@ -186,10 +225,10 @@ class LocaleGen(StateModuleHelper):
lines[i] = search.sub(replace, lines[i])
# Write the modified content back to the file
- with open("/etc/locale.gen", 'w') as fw:
+ with open(ETC_LOCALE_GEN, 'w') as fw:
fw.writelines(lines)
- def apply_change(self, targetState, names):
+ def apply_change_glibc(self, targetState, names):
"""Create or remove locale.
Keyword arguments:
@@ -197,13 +236,13 @@ class LocaleGen(StateModuleHelper):
names -- Names list including encoding such as de_CH.UTF-8.
"""
- self.set_locale(names, enabled=(targetState == "present"))
+ self.set_locale_glibc(names, enabled=(targetState == "present"))
runner = locale_gen_runner(self.module)
with runner() as ctx:
ctx.run()
- def apply_change_ubuntu(self, targetState, names):
+ def apply_change_ubuntu_legacy(self, targetState, names):
"""Create or remove locale.
Keyword arguments:
@@ -219,9 +258,9 @@ class LocaleGen(StateModuleHelper):
ctx.run()
else:
# Delete locale involves discarding the locale from /var/lib/locales/supported.d/local and regenerating all locales.
- with open("/var/lib/locales/supported.d/local", "r") as fr:
+ with open(VAR_LIB_LOCALES_LOCAL, "r") as fr:
content = fr.readlines()
- with open("/var/lib/locales/supported.d/local", "w") as fw:
+ with open(VAR_LIB_LOCALES_LOCAL, "w") as fw:
for line in content:
locale, charset = line.split(' ')
if locale not in names:
@@ -235,10 +274,7 @@ class LocaleGen(StateModuleHelper):
def __state_fallback__(self):
if self.vars.state_tracking == self.vars.state:
return
- if self.vars.ubuntu_mode:
- self.apply_change_ubuntu(self.vars.state, self.vars.name)
- else:
- self.apply_change(self.vars.state, self.vars.name)
+ self.MECHANISMS[self.vars.mechanism]["apply_change"](self.vars.state, self.vars.name)
def main():
diff --git a/plugins/modules/logentries.py b/plugins/modules/logentries.py
index f177cf4546..69e83f5e49 100644
--- a/plugins/modules/logentries.py
+++ b/plugins/modules/logentries.py
@@ -9,49 +9,49 @@ from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
module: logentries
author: "Ivan Vanderbyl (@ivanvanderbyl)"
-short_description: Module for tracking logs via logentries.com
+short_description: Module for tracking logs using U(logentries.com)
description:
- - Sends logs to LogEntries in realtime
+ - Sends logs to LogEntries in realtime.
extends_documentation_fragment:
- - community.general.attributes
+ - community.general.attributes
attributes:
- check_mode:
- support: full
- diff_mode:
- support: none
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
options:
- path:
- type: str
- description:
- - path to a log file
- required: true
- state:
- type: str
- description:
- - following state of the log
- choices: [ 'present', 'absent', 'followed', 'unfollowed' ]
- required: false
- default: present
- name:
- type: str
- description:
- - name of the log
- required: false
- logtype:
- type: str
- description:
- - type of the log
- required: false
- aliases: [type]
+ path:
+ type: str
+ description:
+ - Path to a log file.
+ required: true
+ state:
+ type: str
+ description:
+ - Following state of the log.
+ choices: ['present', 'absent', 'followed', 'unfollowed']
+ required: false
+ default: present
+ name:
+ type: str
+ description:
+ - Name of the log.
+ required: false
+ logtype:
+ type: str
+ description:
+ - Type of the log.
+ required: false
+ aliases: [type]
notes:
- - Requires the LogEntries agent which can be installed following the instructions at logentries.com
-'''
-EXAMPLES = '''
+ - Requires the LogEntries agent which can be installed following the instructions at U(logentries.com).
+"""
+
+EXAMPLES = r"""
- name: Track nginx logs
community.general.logentries:
path: /var/log/nginx/access.log
@@ -62,7 +62,7 @@ EXAMPLES = '''
community.general.logentries:
path: /var/log/nginx/error.log
state: absent
-'''
+"""
from ansible.module_utils.basic import AnsibleModule
@@ -139,8 +139,8 @@ def main():
argument_spec=dict(
path=dict(required=True),
state=dict(default="present", choices=["present", "followed", "absent", "unfollowed"]),
- name=dict(required=False, default=None, type='str'),
- logtype=dict(required=False, default=None, type='str', aliases=['type'])
+ name=dict(type='str'),
+ logtype=dict(type='str', aliases=['type'])
),
supports_check_mode=True
)
diff --git a/plugins/modules/logentries_msg.py b/plugins/modules/logentries_msg.py
index 03851ad1f4..8b2a7c5155 100644
--- a/plugins/modules/logentries_msg.py
+++ b/plugins/modules/logentries_msg.py
@@ -9,12 +9,11 @@ from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
module: logentries_msg
short_description: Send a message to logentries
description:
- - Send a message to logentries
+ - Send a message to logentries.
extends_documentation_fragment:
- community.general.attributes
attributes:
@@ -36,24 +35,24 @@ options:
api:
type: str
description:
- - API endpoint
+ - API endpoint.
default: data.logentries.com
port:
type: int
description:
- - API endpoint port
+ - API endpoint port.
default: 80
author: "Jimmy Tang (@jcftang) "
-'''
+"""
-RETURN = '''# '''
+RETURN = """#"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Send a message to logentries
community.general.logentries_msg:
- token=00000000-0000-0000-0000-000000000000
- msg="{{ ansible_hostname }}"
-'''
+ token: 00000000-0000-0000-0000-000000000000
+ msg: "{{ ansible_hostname }}"
+"""
import socket
diff --git a/plugins/modules/logstash_plugin.py b/plugins/modules/logstash_plugin.py
index 7ee118ff28..afacf7767f 100644
--- a/plugins/modules/logstash_plugin.py
+++ b/plugins/modules/logstash_plugin.py
@@ -8,53 +8,51 @@ from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
module: logstash_plugin
short_description: Manage Logstash plugins
description:
- - Manages Logstash plugins.
+ - Manages Logstash plugins.
author: Loic Blot (@nerzhul)
extends_documentation_fragment:
- - community.general.attributes
+ - community.general.attributes
attributes:
- check_mode:
- support: full
- diff_mode:
- support: none
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
options:
- name:
- type: str
- description:
- - Install plugin with that name.
- required: true
- state:
- type: str
- description:
- - Apply plugin state.
- choices: ["present", "absent"]
- default: present
- plugin_bin:
- type: path
- description:
- - Specify logstash-plugin to use for plugin management.
- default: /usr/share/logstash/bin/logstash-plugin
- proxy_host:
- type: str
- description:
- - Proxy host to use during plugin installation.
- proxy_port:
- type: str
- description:
- - Proxy port to use during plugin installation.
- version:
- type: str
- description:
- - Specify plugin Version of the plugin to install.
- If plugin exists with previous version, it will NOT be updated.
-'''
+ name:
+ type: str
+ description:
+ - Install plugin with that name.
+ required: true
+ state:
+ type: str
+ description:
+ - Apply plugin state.
+ choices: ["present", "absent"]
+ default: present
+ plugin_bin:
+ type: path
+ description:
+ - Specify logstash-plugin to use for plugin management.
+ default: /usr/share/logstash/bin/logstash-plugin
+ proxy_host:
+ type: str
+ description:
+ - Proxy host to use during plugin installation.
+ proxy_port:
+ type: str
+ description:
+ - Proxy port to use during plugin installation.
+ version:
+ type: str
+ description:
+ - Specify version of the plugin to install. If the plugin exists with a previous version, it is B(not) updated.
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Install Logstash beats input plugin
community.general.logstash_plugin:
state: present
@@ -77,7 +75,7 @@ EXAMPLES = '''
name: logstash-input-beats
environment:
LS_JAVA_OPTS: "-Xms256m -Xmx256m"
-'''
+"""
from ansible.module_utils.basic import AnsibleModule
diff --git a/plugins/modules/lvg.py b/plugins/modules/lvg.py
index 7ff7e3a2e7..42d4c9182e 100644
--- a/plugins/modules/lvg.py
+++ b/plugins/modules/lvg.py
@@ -9,10 +9,9 @@
from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = r'''
----
+DOCUMENTATION = r"""
author:
-- Alexander Bulimov (@abulimov)
+ - Alexander Bulimov (@abulimov)
module: lvg
short_description: Configure LVM volume groups
description:
@@ -27,78 +26,84 @@ attributes:
options:
vg:
description:
- - The name of the volume group.
+ - The name of the volume group.
type: str
required: true
pvs:
description:
- - List of comma-separated devices to use as physical devices in this volume group.
- - Required when creating or resizing volume group.
- - The module will take care of running pvcreate if needed.
+ - List of comma-separated devices to use as physical devices in this volume group.
+ - Required when creating or resizing volume group.
+ - The module runs C(pvcreate) if needed.
+ - O(remove_extra_pvs) controls whether or not unspecified physical devices are removed from the volume group.
type: list
elements: str
pesize:
description:
- - "The size of the physical extent. O(pesize) must be a power of 2 of at least 1 sector
- (where the sector size is the largest sector size of the PVs currently used in the VG),
- or at least 128KiB."
- - O(pesize) can be optionally suffixed by a UNIT (k/K/m/M/g/G), default unit is megabyte.
+ - The size of the physical extent. O(pesize) must be a power of 2 of at least 1 sector (where the sector size is the
+ largest sector size of the PVs currently used in the VG), or at least 128KiB.
+ - O(pesize) can be optionally suffixed by a UNIT (k/K/m/M/g/G), default unit is megabyte.
type: str
default: "4"
pv_options:
description:
- - Additional options to pass to C(pvcreate) when creating the volume group.
+ - Additional options to pass to C(pvcreate) when creating the volume group.
type: str
default: ''
pvresize:
description:
- - If V(true), resize the physical volume to the maximum available size.
+ - If V(true), resize the physical volume to the maximum available size.
type: bool
default: false
version_added: '0.2.0'
vg_options:
description:
- - Additional options to pass to C(vgcreate) when creating the volume group.
+ - Additional options to pass to C(vgcreate) when creating the volume group.
type: str
default: ''
state:
description:
- - Control if the volume group exists and it's state.
- - The states V(active) and V(inactive) implies V(present) state. Added in 7.1.0
- - "If V(active) or V(inactive), the module manages the VG's logical volumes current state.
- The module also handles the VG's autoactivation state if supported
- unless when creating a volume group and the autoactivation option specified in O(vg_options)."
+ - Control if the volume group exists and its state.
+ - The states V(active) and V(inactive) implies V(present) state. Added in 7.1.0.
+ - If V(active) or V(inactive), the module manages the VG's logical volumes current state. The module also handles the
+ VG's autoactivation state if supported unless when creating a volume group and the autoactivation option specified
+ in O(vg_options).
type: str
- choices: [ absent, present, active, inactive ]
+ choices: [absent, present, active, inactive]
default: present
force:
description:
- - If V(true), allows to remove volume group with logical volumes.
+ - If V(true), allows to remove volume group with logical volumes.
type: bool
default: false
reset_vg_uuid:
description:
- - Whether the volume group's UUID is regenerated.
- - This is B(not idempotent). Specifying this parameter always results in a change.
+ - Whether the volume group's UUID is regenerated.
+ - This is B(not idempotent). Specifying this parameter always results in a change.
type: bool
default: false
version_added: 7.1.0
reset_pv_uuid:
description:
- - Whether the volume group's physical volumes' UUIDs are regenerated.
- - This is B(not idempotent). Specifying this parameter always results in a change.
+ - Whether the volume group's physical volumes' UUIDs are regenerated.
+ - This is B(not idempotent). Specifying this parameter always results in a change.
type: bool
default: false
version_added: 7.1.0
+ remove_extra_pvs:
+ description:
+ - Remove physical volumes from the volume group which are not in O(pvs).
+ type: bool
+ default: true
+ version_added: 10.4.0
seealso:
-- module: community.general.filesystem
-- module: community.general.lvol
-- module: community.general.parted
+ - module: community.general.filesystem
+ - module: community.general.lvol
+ - module: community.general.parted
notes:
- This module does not modify PE size for already present volume group.
-'''
+"""
-EXAMPLES = r'''
+EXAMPLES = r"""
- name: Create a volume group on top of /dev/sda1 with physical extent size = 32MB
community.general.lvg:
vg: vg.services
@@ -118,7 +123,9 @@ EXAMPLES = r'''
- name: Create or resize a volume group on top of /dev/sdb1 and /dev/sdc5.
community.general.lvg:
vg: vg.services
- pvs: /dev/sdb1,/dev/sdc5
+ pvs:
+ - /dev/sdb1
+ - /dev/sdc5
- name: Remove a volume group with name vg.services
community.general.lvg:
@@ -141,6 +148,13 @@ EXAMPLES = r'''
state: active
vg: vg.services
+- name: Add new PVs to volume group without removing existing ones
+ community.general.lvg:
+ vg: vg.services
+ pvs: /dev/sdb1,/dev/sdc1
+ remove_extra_pvs: false
+ state: present
+
- name: Reset a volume group UUID
community.general.lvg:
state: inactive
@@ -151,10 +165,12 @@ EXAMPLES = r'''
community.general.lvg:
state: inactive
vg: vg.services
- pvs: /dev/sdb1,/dev/sdc5
+ pvs:
+ - /dev/sdb1
+ - /dev/sdc5
reset_vg_uuid: true
reset_pv_uuid: true
-'''
+"""
import itertools
import os
@@ -385,6 +401,7 @@ def main():
force=dict(type='bool', default=False),
reset_vg_uuid=dict(type='bool', default=False),
reset_pv_uuid=dict(type='bool', default=False),
+ remove_extra_pvs=dict(type="bool", default=True),
),
required_if=[
['reset_pv_uuid', True, ['pvs']],
@@ -401,6 +418,7 @@ def main():
vgoptions = module.params['vg_options'].split()
reset_vg_uuid = module.boolean(module.params['reset_vg_uuid'])
reset_pv_uuid = module.boolean(module.params['reset_pv_uuid'])
+ remove_extra_pvs = module.boolean(module.params["remove_extra_pvs"])
this_vg = find_vg(module=module, vg=vg)
present_state = state in ['present', 'active', 'inactive']
@@ -496,6 +514,9 @@ def main():
devs_to_remove = list(set(current_devs) - set(dev_list))
devs_to_add = list(set(dev_list) - set(current_devs))
+ if not remove_extra_pvs:
+ devs_to_remove = []
+
if current_devs:
if present_state:
for device in current_devs:
diff --git a/plugins/modules/lvg_rename.py b/plugins/modules/lvg_rename.py
index bd48ffa62f..37f513697e 100644
--- a/plugins/modules/lvg_rename.py
+++ b/plugins/modules/lvg_rename.py
@@ -8,8 +8,7 @@
from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = r'''
----
+DOCUMENTATION = r"""
author:
- Laszlo Szomor (@lszomor)
module: lvg_rename
@@ -27,23 +26,23 @@ version_added: 7.1.0
options:
vg:
description:
- - The name or UUID of the source VG.
- - See V(vgrename(8\)) for valid values.
+ - The name or UUID of the source VG.
+ - See V(vgrename(8\)) for valid values.
type: str
required: true
vg_new:
description:
- - The new name of the VG.
- - See V(lvm(8\)) for valid names.
+ - The new name of the VG.
+ - See V(lvm(8\)) for valid names.
type: str
required: true
seealso:
-- module: community.general.lvg
+ - module: community.general.lvg
notes:
- This module does not modify VG renaming-related configurations like C(fstab) entries or boot parameters.
-'''
+"""
-EXAMPLES = r'''
+EXAMPLES = r"""
- name: Rename a VG by name
community.general.lvg_rename:
vg: vg_orig_name
@@ -53,7 +52,7 @@ EXAMPLES = r'''
community.general.lvg_rename:
vg_uuid: SNgd0Q-rPYa-dPB8-U1g6-4WZI-qHID-N7y9Vj
vg_new: vg_new_name
-'''
+"""
from ansible.module_utils.basic import AnsibleModule
diff --git a/plugins/modules/lvm_pv.py b/plugins/modules/lvm_pv.py
new file mode 100644
index 0000000000..15740db8c1
--- /dev/null
+++ b/plugins/modules/lvm_pv.py
@@ -0,0 +1,191 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2025, Klention Mali
+# Based on lvol module by Jeroen Hoekx
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r"""
+module: lvm_pv
+short_description: Manage LVM Physical Volumes
+version_added: "11.0.0"
+description:
+ - Creates, resizes or removes LVM Physical Volumes.
+author:
+ - Klention Mali (@klention)
+options:
+ device:
+ description:
+ - Path to the block device to manage.
+ type: path
+ required: true
+ state:
+ description:
+ - Control if the physical volume exists.
+ type: str
+ choices: [present, absent]
+ default: present
+ force:
+ description:
+ - Force the operation.
+ - When O(state=present) (creating a PV), this uses C(pvcreate -f) to force creation.
+ - When O(state=absent) (removing a PV), this uses C(pvremove -ff) to force removal even if part of a volume group.
+ type: bool
+ default: false
+ resize:
+ description:
+ - Resize PV to device size when O(state=present).
+ type: bool
+ default: false
+notes:
+ - Requires LVM2 utilities installed on the target system.
+ - Device path must exist when creating a PV.
+"""
+
+EXAMPLES = r"""
+- name: Creating physical volume on /dev/sdb
+ community.general.lvm_pv:
+ device: /dev/sdb
+
+- name: Creating and resizing (if needed) physical volume
+ community.general.lvm_pv:
+ device: /dev/sdb
+ resize: true
+
+- name: Removing physical volume that is not part of any volume group
+ community.general.lvm_pv:
+ device: /dev/sdb
+ state: absent
+
+- name: Force removing physical volume that is already part of a volume group
+ community.general.lvm_pv:
+ device: /dev/sdb
+ force: true
+ state: absent
+"""
+
+RETURN = r"""
+"""
+
+
+import os
+from ansible.module_utils.basic import AnsibleModule
+
+
+def get_pv_status(module, device):
+ """Check if the device is already a PV."""
+ cmd = ['pvs', '--noheadings', '--readonly', device]
+ return module.run_command(cmd)[0] == 0
+
+
+def get_pv_size(module, device):
+ """Get current PV size in bytes."""
+ cmd = ['pvs', '--noheadings', '--nosuffix', '--units', 'b', '-o', 'pv_size', device]
+ rc, out, err = module.run_command(cmd, check_rc=True)
+ return int(out.strip())
+
+
+def rescan_device(module, device):
+ """Perform storage rescan for the device."""
+ # Extract the base device name (e.g., /dev/sdb -> sdb)
+ base_device = os.path.basename(device)
+ rescan_path = "/sys/block/{0}/device/rescan".format(base_device)
+
+ if os.path.exists(rescan_path):
+ try:
+ with open(rescan_path, 'w') as f:
+ f.write('1')
+ return True
+ except IOError as e:
+ module.warn("Failed to rescan device {0}: {1}".format(device, str(e)))
+ return False
+ else:
+ module.warn("Rescan path not found for device {0}".format(device))
+ return False
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ device=dict(type='path', required=True),
+ state=dict(type='str', default='present', choices=['present', 'absent']),
+ force=dict(type='bool', default=False),
+ resize=dict(type='bool', default=False),
+ ),
+ supports_check_mode=True,
+ )
+
+ device = module.params['device']
+ state = module.params['state']
+ force = module.params['force']
+ resize = module.params['resize']
+ changed = False
+ actions = []
+
+ # Validate device existence for present state
+ if state == 'present' and not os.path.exists(device):
+ module.fail_json(msg="Device %s not found" % device)
+
+ is_pv = get_pv_status(module, device)
+
+ if state == 'present':
+ # Create PV if needed
+ if not is_pv:
+ if module.check_mode:
+ changed = True
+ actions.append('would be created')
+ else:
+ cmd = ['pvcreate']
+ if force:
+ cmd.append('-f')
+ cmd.append(device)
+ rc, out, err = module.run_command(cmd, check_rc=True)
+ changed = True
+ actions.append('created')
+ is_pv = True
+
+ # Handle resizing
+ elif resize and is_pv:
+ if module.check_mode:
+ # In check mode, assume resize would change
+ changed = True
+ actions.append('would be resized')
+ else:
+ # Perform device rescan if each time
+ if rescan_device(module, device):
+ actions.append('rescanned')
+ original_size = get_pv_size(module, device)
+ rc, out, err = module.run_command(['pvresize', device], check_rc=True)
+ new_size = get_pv_size(module, device)
+ if new_size != original_size:
+ changed = True
+ actions.append('resized')
+
+ elif state == 'absent':
+ if is_pv:
+ if module.check_mode:
+ changed = True
+ actions.append('would be removed')
+ else:
+ cmd = ['pvremove', '-y']
+ if force:
+ cmd.append('-ff')
+ changed = True
+ cmd.append(device)
+ rc, out, err = module.run_command(cmd, check_rc=True)
+ actions.append('removed')
+
+ # Generate final message
+ if actions:
+ msg = "PV %s: %s" % (device, ', '.join(actions))
+ else:
+ msg = "No changes needed for PV %s" % device
+ module.exit_json(changed=changed, msg=msg)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/plugins/modules/lvol.py b/plugins/modules/lvol.py
index 3a2f5c7cdd..6166e437f2 100644
--- a/plugins/modules/lvol.py
+++ b/plugins/modules/lvol.py
@@ -8,13 +8,12 @@
from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
author:
- - Jeroen Hoekx (@jhoekx)
- - Alexander Bulimov (@abulimov)
- - Raoul Baudach (@unkaputtbar112)
- - Ziga Kern (@zigaSRC)
+ - Jeroen Hoekx (@jhoekx)
+ - Alexander Bulimov (@abulimov)
+ - Raoul Baudach (@unkaputtbar112)
+ - Ziga Kern (@zigaSRC)
module: lvol
short_description: Configure LVM logical volumes
description:
@@ -31,75 +30,75 @@ options:
type: str
required: true
description:
- - The volume group this logical volume is part of.
+ - The volume group this logical volume is part of.
lv:
type: str
description:
- - The name of the logical volume.
+ - The name of the logical volume.
size:
type: str
description:
- - The size of the logical volume, according to lvcreate(8) --size, by
- default in megabytes or optionally with one of [bBsSkKmMgGtTpPeE] units; or
- according to lvcreate(8) --extents as a percentage of [VG|PVS|FREE|ORIGIN];
- Float values must begin with a digit.
- - When resizing, apart from specifying an absolute size you may, according to
- lvextend(8)|lvreduce(8) C(--size), specify the amount to extend the logical volume with
- the prefix V(+) or the amount to reduce the logical volume by with prefix V(-).
- - Resizing using V(+) or V(-) was not supported prior to community.general 3.0.0.
- - Please note that when using V(+), V(-), or percentage of FREE, the module is B(not idempotent).
+ - The size of the logical volume, according to lvcreate(8) C(--size), by default in megabytes or optionally with one
+ of [bBsSkKmMgGtTpPeE] units; or according to lvcreate(8) C(--extents) as a percentage of [VG|PVS|FREE|ORIGIN]; Float
+ values must begin with a digit.
+ - When resizing, apart from specifying an absolute size you may, according to lvextend(8)|lvreduce(8) C(--size), specify
+ the amount to extend the logical volume with the prefix V(+) or the amount to reduce the logical volume by with prefix
+ V(-).
+ - Resizing using V(+) or V(-) was not supported prior to community.general 3.0.0.
+ - Please note that when using V(+), V(-), or percentage of FREE, the module is B(not idempotent).
state:
type: str
description:
- - Control if the logical volume exists. If V(present) and the
- volume does not already exist then the O(size) option is required.
- choices: [ absent, present ]
+ - Control if the logical volume exists. If V(present) and the volume does not already exist then the O(size) option
+ is required.
+ choices: [absent, present]
default: present
active:
description:
- - Whether the volume is active and visible to the host.
+ - Whether the volume is active and visible to the host.
type: bool
default: true
force:
description:
- - Shrink or remove operations of volumes requires this switch. Ensures that
- that filesystems get never corrupted/destroyed by mistake.
+ - Shrink or remove operations of volumes requires this switch. Ensures that filesystems never get corrupted/destroyed
+ by mistake.
type: bool
default: false
opts:
type: str
description:
- - Free-form options to be passed to the lvcreate command.
+ - Free-form options to be passed to the lvcreate command.
snapshot:
type: str
description:
- - The name of a snapshot volume to be configured. When creating a snapshot volume, the O(lv) parameter specifies the origin volume.
+ - The name of a snapshot volume to be configured. When creating a snapshot volume, the O(lv) parameter specifies the
+ origin volume.
pvs:
type: list
elements: str
description:
- - List of physical volumes (for example V(/dev/sda, /dev/sdb)).
+ - List of physical volumes (for example V(/dev/sda, /dev/sdb)).
thinpool:
type: str
description:
- - The thin pool volume name. When you want to create a thin provisioned volume, specify a thin pool volume name.
+ - The thin pool volume name. When you want to create a thin provisioned volume, specify a thin pool volume name.
shrink:
description:
- - Shrink if current size is higher than size requested.
+ - Shrink if current size is higher than size requested.
type: bool
default: true
resizefs:
description:
- - Resize the underlying filesystem together with the logical volume.
- - Supported for C(ext2), C(ext3), C(ext4), C(reiserfs) and C(XFS) filesystems.
- Attempts to resize other filesystem types will fail.
+ - Resize the underlying filesystem together with the logical volume.
+ - Supported for C(ext2), C(ext3), C(ext4), C(reiserfs) and C(XFS) filesystems. Attempts to resize other filesystem types
+ result in failure.
type: bool
default: false
notes:
- You must specify lv (when managing the state of logical volumes) or thinpool (when managing a thin provisioned volume).
-'''
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Create a logical volume of 512m
community.general.lvol:
vg: firefly
@@ -233,7 +232,7 @@ EXAMPLES = '''
lv: test
thinpool: testpool
size: 128g
-'''
+"""
import re
import shlex
diff --git a/plugins/modules/lxc_container.py b/plugins/modules/lxc_container.py
index 2d768eaafd..cbd643efdb 100644
--- a/plugins/modules/lxc_container.py
+++ b/plugins/modules/lxc_container.py
@@ -9,8 +9,7 @@ from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = r'''
----
+DOCUMENTATION = r"""
module: lxc_container
short_description: Manage LXC Containers
description:
@@ -19,183 +18,172 @@ author: "Kevin Carter (@cloudnull)"
extends_documentation_fragment:
- community.general.attributes
attributes:
- check_mode:
- support: none
- diff_mode:
- support: none
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
options:
- name:
- description:
- - Name of a container.
- type: str
- required: true
- backing_store:
- choices:
- - dir
- - lvm
- - loop
- - btrfs
- - overlayfs
- - zfs
- description:
- - Backend storage type for the container.
- type: str
- default: dir
- template:
- description:
- - Name of the template to use within an LXC create.
- type: str
- default: ubuntu
- template_options:
- description:
- - Template options when building the container.
- type: str
- config:
- description:
- - Path to the LXC configuration file.
- type: path
- lv_name:
- description:
- - Name of the logical volume, defaults to the container name.
- - If not specified, it defaults to C($CONTAINER_NAME).
- type: str
- vg_name:
- description:
- - If backend store is lvm, specify the name of the volume group.
- type: str
- default: lxc
- thinpool:
- description:
- - Use LVM thin pool called TP.
- type: str
- fs_type:
- description:
- - Create fstype TYPE.
- type: str
- default: ext4
- fs_size:
- description:
- - File system Size.
- type: str
- default: 5G
- directory:
- description:
- - Place rootfs directory under DIR.
- type: path
- zfs_root:
- description:
- - Create zfs under given zfsroot.
- type: str
- container_command:
- description:
- - Run a command within a container.
- type: str
- lxc_path:
- description:
- - Place container under E(PATH).
- type: path
- container_log:
- description:
- - Enable a container log for host actions to the container.
- type: bool
- default: false
- container_log_level:
- choices:
- - Info
- - info
- - INFO
- - Error
- - error
- - ERROR
- - Debug
- - debug
- - DEBUG
- description:
- - Set the log level for a container where O(container_log) was set.
- type: str
- required: false
- default: INFO
- clone_name:
- description:
- - Name of the new cloned server.
- - This is only used when state is clone.
- type: str
- clone_snapshot:
- description:
- - Create a snapshot a container when cloning.
- - This is not supported by all container storage backends.
- - Enabling this may fail if the backing store does not support snapshots.
- type: bool
- default: false
- archive:
- description:
- - Create an archive of a container.
- - This will create a tarball of the running container.
- type: bool
- default: false
- archive_path:
- description:
- - Path the save the archived container.
- - If the path does not exist the archive method will attempt to create it.
- type: path
- archive_compression:
- choices:
- - gzip
- - bzip2
- - none
- description:
- - Type of compression to use when creating an archive of a running
- container.
- type: str
- default: gzip
- state:
- choices:
- - started
- - stopped
- - restarted
- - absent
- - frozen
- - clone
- description:
- - Define the state of a container.
- - If you clone a container using O(clone_name) the newly cloned
- container created in a stopped state.
- - The running container will be stopped while the clone operation is
- happening and upon completion of the clone the original container
- state will be restored.
- type: str
- default: started
- container_config:
- description:
- - A list of C(key=value) options to use when configuring a container.
- type: list
- elements: str
+ name:
+ description:
+ - Name of a container.
+ type: str
+ required: true
+ backing_store:
+ choices:
+ - dir
+ - lvm
+ - loop
+ - btrfs
+ - overlayfs
+ - zfs
+ description:
+ - Backend storage type for the container.
+ type: str
+ default: dir
+ template:
+ description:
+ - Name of the template to use within an LXC create.
+ type: str
+ default: ubuntu
+ template_options:
+ description:
+ - Template options when building the container.
+ type: str
+ config:
+ description:
+ - Path to the LXC configuration file.
+ type: path
+ lv_name:
+ description:
+ - Name of the logical volume, defaults to the container name.
+ - If not specified, it defaults to E(CONTAINER_NAME).
+ type: str
+ vg_name:
+ description:
+ - If backend store is lvm, specify the name of the volume group.
+ type: str
+ default: lxc
+ thinpool:
+ description:
+ - Use LVM thin pool called TP.
+ type: str
+ fs_type:
+ description:
+ - Create fstype TYPE.
+ type: str
+ default: ext4
+ fs_size:
+ description:
+ - File system Size.
+ type: str
+ default: 5G
+ directory:
+ description:
+ - Place rootfs directory under DIR.
+ type: path
+ zfs_root:
+ description:
+ - Create zfs under given zfsroot.
+ type: str
+ container_command:
+ description:
+ - Run a command within a container.
+ type: str
+ lxc_path:
+ description:
+ - Place container under E(PATH).
+ type: path
+ container_log:
+ description:
+ - Enable a container log for host actions to the container.
+ type: bool
+ default: false
+ container_log_level:
+ choices:
+ - Info
+ - info
+ - INFO
+ - Error
+ - error
+ - ERROR
+ - Debug
+ - debug
+ - DEBUG
+ description:
+ - Set the log level for a container where O(container_log) was set.
+ type: str
+ required: false
+ default: INFO
+ clone_name:
+ description:
+ - Name of the new cloned server.
+ - This is only used when state is clone.
+ type: str
+ clone_snapshot:
+ description:
+ - Create a snapshot a container when cloning.
+ - This is not supported by all container storage backends.
+ - Enabling this may fail if the backing store does not support snapshots.
+ type: bool
+ default: false
+ archive:
+ description:
+ - Create an archive of a container.
+ - This creates a tarball of the running container.
+ type: bool
+ default: false
+ archive_path:
+ description:
+ - Path the save the archived container.
+ - If the path does not exist the archive method attempts to create it.
+ type: path
+ archive_compression:
+ choices:
+ - gzip
+ - bzip2
+ - none
+ description:
+ - Type of compression to use when creating an archive of a running container.
+ type: str
+ default: gzip
+ state:
+ choices:
+ - started
+ - stopped
+ - restarted
+ - absent
+ - frozen
+ - clone
+ description:
+ - Define the state of a container.
+ - If you clone a container using O(clone_name) the newly cloned container created in a stopped state.
+ - The running container is stopped while the clone operation is happening and upon completion of the clone the original
+ container state is restored.
+ type: str
+ default: started
+ container_config:
+ description:
+ - A list of C(key=value) options to use when configuring a container.
+ type: list
+ elements: str
requirements:
- 'lxc >= 2.0 # OS package'
- 'python3 >= 3.5 # OS Package'
- 'python3-lxc # OS Package'
notes:
- - Containers must have a unique name. If you attempt to create a container
- with a name that already exists in the users namespace the module will
- simply return as "unchanged".
- - The O(container_command) can be used with any state except V(absent). If
- used with state V(stopped) the container will be V(started), the command
- executed, and then the container V(stopped) again. Likewise if O(state=stopped)
- and the container does not exist it will be first created,
- V(started), the command executed, and then V(stopped). If you use a "|"
- in the variable you can use common script formatting within the variable
- itself. The O(container_command) option will always execute as BASH.
- When using O(container_command), a log file is created in the C(/tmp/) directory
- which contains both C(stdout) and C(stderr) of any command executed.
- - If O(archive=true) the system will attempt to create a compressed
- tarball of the running container. The O(archive) option supports LVM backed
- containers and will create a snapshot of the running container when
- creating the archive.
- - If your distro does not have a package for C(python3-lxc), which is a
- requirement for this module, it can be installed from source at
- U(https://github.com/lxc/python3-lxc) or installed via pip using the
- package name C(lxc).
-'''
+ - Containers must have a unique name. If you attempt to create a container with a name that already exists in the users
+ namespace the module simply returns as "unchanged".
+ - The O(container_command) can be used with any state except V(absent). If used with state V(stopped) the container is V(started),
+ the command executed, and then the container V(stopped) again. Likewise if O(state=stopped) and the container does not
+ exist it is first created, V(started), the command executed, and then V(stopped). If you use a C(|) in the variable you
+ can use common script formatting within the variable itself. The O(container_command) option always execute as C(bash).
+ When using O(container_command), a log file is created in the C(/tmp/) directory which contains both C(stdout) and C(stderr)
+ of any command executed.
+ - If O(archive=true) the system attempts to create a compressed tarball of the running container. The O(archive) option
+ supports LVM backed containers and creates a snapshot of the running container when creating the archive.
+ - If your distro does not have a package for C(python3-lxc), which is a requirement for this module, it can be installed
+ from source at U(https://github.com/lxc/python3-lxc) or installed using C(pip install lxc).
+"""
EXAMPLES = r"""
- name: Create a started container
@@ -268,14 +256,14 @@ EXAMPLES = r"""
ansible.builtin.debug:
var: lvm_container_info
-- name: Run a command in a container and ensure its in a "stopped" state.
+- name: Run a command in a container and ensure it is in a "stopped" state.
community.general.lxc_container:
name: test-container-started
state: stopped
container_command: |
echo 'hello world.' | tee /opt/stopped
-- name: Run a command in a container and ensure its it in a "frozen" state.
+- name: Run a command in a container and ensure it is in a "frozen" state.
community.general.lxc_container:
name: test-container-stopped
state: frozen
@@ -382,45 +370,45 @@ EXAMPLES = r"""
RETURN = r"""
lxc_container:
- description: container information
- returned: success
- type: complex
- contains:
- name:
- description: name of the lxc container
- returned: success
- type: str
- sample: test_host
- init_pid:
- description: pid of the lxc init process
- returned: success
- type: int
- sample: 19786
- interfaces:
- description: list of the container's network interfaces
- returned: success
- type: list
- sample: [ "eth0", "lo" ]
- ips:
- description: list of ips
- returned: success
- type: list
- sample: [ "10.0.3.3" ]
- state:
- description: resulting state of the container
- returned: success
- type: str
- sample: "running"
- archive:
- description: resulting state of the container
- returned: success, when archive is true
- type: str
- sample: "/tmp/test-container-config.tar"
- clone:
- description: if the container was cloned
- returned: success, when clone_name is specified
- type: bool
- sample: true
+ description: Container information.
+ returned: success
+ type: complex
+ contains:
+ name:
+ description: Name of the LXC container.
+ returned: success
+ type: str
+ sample: test_host
+ init_pid:
+ description: Pid of the LXC init process.
+ returned: success
+ type: int
+ sample: 19786
+ interfaces:
+ description: List of the container's network interfaces.
+ returned: success
+ type: list
+ sample: ["eth0", "lo"]
+ ips:
+ description: List of IPs.
+ returned: success
+ type: list
+ sample: ["10.0.3.3"]
+ state:
+ description: Resulting state of the container.
+ returned: success
+ type: str
+ sample: "running"
+ archive:
+ description: Resulting state of the container.
+ returned: success, when archive is true
+ type: str
+ sample: "/tmp/test-container-config.tar"
+ clone:
+ description: If the container was cloned.
+ returned: success, when clone_name is specified
+ type: bool
+ sample: true
"""
import os
@@ -694,7 +682,7 @@ class LxcContainerManagement(object):
"""Configure an LXC container.
Write new configuration values to the lxc config file. This will
- stop the container if it's running write the new options and then
+ stop the container if it is running write the new options and then
restart the container upon completion.
"""
diff --git a/plugins/modules/lxca_cmms.py b/plugins/modules/lxca_cmms.py
index 1f811a7efa..87b0e2e125 100644
--- a/plugins/modules/lxca_cmms.py
+++ b/plugins/modules/lxca_cmms.py
@@ -8,16 +8,14 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
author:
- Naval Patel (@navalkp)
- Prashant Bhosale (@prabhosa)
module: lxca_cmms
short_description: Custom module for lxca cmms inventory utility
description:
- - This module returns/displays a inventory details of cmms
-
+ - This module returns/displays a inventory details of cmms.
attributes:
check_mode:
support: none
@@ -26,32 +24,28 @@ attributes:
options:
uuid:
- description:
- uuid of device, this is string with length greater than 16.
+ description: UUID of device, this is string with length greater than 16.
type: str
command_options:
- description:
- options to filter nodes information
+ description: Options to filter nodes information.
default: cmms
choices:
- - cmms
- - cmms_by_uuid
- - cmms_by_chassis_uuid
+ - cmms
+ - cmms_by_uuid
+ - cmms_by_chassis_uuid
type: str
chassis:
- description:
- uuid of chassis, this is string with length greater than 16.
+ description: UUID of chassis, this is string with length greater than 16.
type: str
extends_documentation_fragment:
- community.general.lxca_common
- community.general.attributes
+"""
-'''
-
-EXAMPLES = '''
+EXAMPLES = r"""
# get all cmms info
- name: Get nodes data from LXCA
community.general.lxca_cmms:
@@ -76,28 +70,27 @@ EXAMPLES = '''
auth_url: "https://10.243.15.168"
chassis: "3C737AA5E31640CE949B10C129A8B01F"
command_options: cmms_by_chassis_uuid
+"""
-'''
-
-RETURN = r'''
+RETURN = r"""
result:
- description: cmms detail from lxca
- returned: success
- type: dict
- sample:
- cmmList:
- - machineType: ''
- model: ''
- type: 'CMM'
- uuid: '118D2C88C8FD11E4947B6EAE8B4BDCDF'
+ description: Cmms detail from lxca.
+ returned: success
+ type: dict
+ sample:
+ cmmList:
+ - machineType: ''
+ model: ''
+ type: 'CMM'
+ uuid: '118D2C88C8FD11E4947B6EAE8B4BDCDF'
# bunch of properties
- - machineType: ''
- model: ''
- type: 'CMM'
- uuid: '223D2C88C8FD11E4947B6EAE8B4BDCDF'
+ - machineType: ''
+ model: ''
+ type: 'CMM'
+ uuid: '223D2C88C8FD11E4947B6EAE8B4BDCDF'
# bunch of properties
# Multiple cmms details
-'''
+"""
import traceback
from ansible.module_utils.basic import AnsibleModule
@@ -151,8 +144,8 @@ FUNC_DICT = {
INPUT_ARG_SPEC = dict(
command_options=dict(default='cmms', choices=['cmms', 'cmms_by_uuid',
'cmms_by_chassis_uuid']),
- uuid=dict(default=None),
- chassis=dict(default=None)
+ uuid=dict(),
+ chassis=dict()
)
diff --git a/plugins/modules/lxca_nodes.py b/plugins/modules/lxca_nodes.py
index 3b37322edb..91d3337b27 100644
--- a/plugins/modules/lxca_nodes.py
+++ b/plugins/modules/lxca_nodes.py
@@ -8,16 +8,14 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
author:
- Naval Patel (@navalkp)
- Prashant Bhosale (@prabhosa)
module: lxca_nodes
short_description: Custom module for lxca nodes inventory utility
description:
- - This module returns/displays a inventory details of nodes
-
+ - This module returns/displays a inventory details of nodes.
attributes:
check_mode:
support: none
@@ -26,34 +24,30 @@ attributes:
options:
uuid:
- description:
- uuid of device, this is string with length greater than 16.
+ description: UUID of device, this is string with length greater than 16.
type: str
command_options:
- description:
- options to filter nodes information
+ description: Options to filter nodes information.
default: nodes
choices:
- - nodes
- - nodes_by_uuid
- - nodes_by_chassis_uuid
- - nodes_status_managed
- - nodes_status_unmanaged
+ - nodes
+ - nodes_by_uuid
+ - nodes_by_chassis_uuid
+ - nodes_status_managed
+ - nodes_status_unmanaged
type: str
chassis:
- description:
- uuid of chassis, this is string with length greater than 16.
+ description: UUID of chassis, this is string with length greater than 16.
type: str
extends_documentation_fragment:
- community.general.lxca_common
- community.general.attributes
+"""
-'''
-
-EXAMPLES = '''
+EXAMPLES = r"""
# get all nodes info
- name: Get nodes data from LXCA
community.general.lxca_nodes:
@@ -95,28 +89,27 @@ EXAMPLES = '''
login_password: Password
auth_url: "https://10.243.15.168"
command_options: nodes_status_unmanaged
+"""
-'''
-
-RETURN = r'''
+RETURN = r"""
result:
- description: nodes detail from lxca
- returned: always
- type: dict
- sample:
- nodeList:
- - machineType: '6241'
- model: 'AC1'
- type: 'Rack-TowerServer'
- uuid: '118D2C88C8FD11E4947B6EAE8B4BDCDF'
+ description: Nodes detail from lxca.
+ returned: always
+ type: dict
+ sample:
+ nodeList:
+ - machineType: '6241'
+ model: 'AC1'
+ type: 'Rack-TowerServer'
+ uuid: '118D2C88C8FD11E4947B6EAE8B4BDCDF'
# bunch of properties
- - machineType: '8871'
- model: 'AC1'
- type: 'Rack-TowerServer'
- uuid: '223D2C88C8FD11E4947B6EAE8B4BDCDF'
+ - machineType: '8871'
+ model: 'AC1'
+ type: 'Rack-TowerServer'
+ uuid: '223D2C88C8FD11E4947B6EAE8B4BDCDF'
# bunch of properties
# Multiple nodes details
-'''
+"""
import traceback
from ansible.module_utils.basic import AnsibleModule
@@ -182,7 +175,7 @@ INPUT_ARG_SPEC = dict(
'nodes_by_chassis_uuid',
'nodes_status_managed',
'nodes_status_unmanaged']),
- uuid=dict(default=None), chassis=dict(default=None)
+ uuid=dict(), chassis=dict()
)
diff --git a/plugins/modules/lxd_container.py b/plugins/modules/lxd_container.py
index 5c5d8a4d8d..4abec5acaa 100644
--- a/plugins/modules/lxd_container.py
+++ b/plugins/modules/lxd_container.py
@@ -9,8 +9,7 @@ from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
module: lxd_container
short_description: Manage LXD instances
description:
@@ -19,198 +18,182 @@ author: "Hiroaki Nakamura (@hnakamur)"
extends_documentation_fragment:
- community.general.attributes
attributes:
- check_mode:
- support: full
- version_added: 6.4.0
- diff_mode:
- support: full
- version_added: 6.4.0
+ check_mode:
+ support: full
+ version_added: 6.4.0
+ diff_mode:
+ support: full
+ version_added: 6.4.0
options:
- name:
- description:
- - Name of an instance.
- type: str
- required: true
- project:
- description:
- - 'Project of an instance.
- See U(https://documentation.ubuntu.com/lxd/en/latest/projects/).'
- required: false
- type: str
- version_added: 4.8.0
- architecture:
- description:
- - 'The architecture for the instance (for example V(x86_64) or V(i686)).
- See U(https://documentation.ubuntu.com/lxd/en/latest/api/#/instances/instance_get).'
- type: str
- required: false
- config:
- description:
- - 'The config for the instance (for example V({"limits.cpu": "2"})).
- See U(https://documentation.ubuntu.com/lxd/en/latest/api/#/instances/instance_get).'
- - If the instance already exists and its "config" values in metadata
- obtained from the LXD API U(https://documentation.ubuntu.com/lxd/en/latest/api/#/instances/instance_get)
- are different, then this module tries to apply the configurations
- U(https://documentation.ubuntu.com/lxd/en/latest/api/#/instances/instance_put).
- - The keys starting with C(volatile.) are ignored for this comparison when O(ignore_volatile_options=true).
- type: dict
- required: false
- ignore_volatile_options:
- description:
- - If set to V(true), options starting with C(volatile.) are ignored. As a result,
- they are reapplied for each execution.
- - This default behavior can be changed by setting this option to V(false).
- - The default value changed from V(true) to V(false) in community.general 6.0.0.
- type: bool
- required: false
- default: false
- version_added: 3.7.0
- profiles:
- description:
- - Profile to be used by the instance.
- type: list
- elements: str
- devices:
- description:
- - 'The devices for the instance
- (for example V({ "rootfs": { "path": "/dev/kvm", "type": "unix-char" }})).
- See U(https://documentation.ubuntu.com/lxd/en/latest/api/#/instances/instance_get).'
- type: dict
- required: false
- ephemeral:
- description:
- - Whether or not the instance is ephemeral (for example V(true) or V(false)).
- See U(https://documentation.ubuntu.com/lxd/en/latest/api/#/instances/instance_get).
- required: false
- type: bool
- source:
- description:
- - 'The source for the instance
- (for example V({ "type": "image", "mode": "pull", "server": "https://cloud-images.ubuntu.com/releases/",
- "protocol": "simplestreams", "alias": "22.04" })).'
- - 'See U(https://documentation.ubuntu.com/lxd/en/latest/api/) for complete API documentation.'
- - 'Note that C(protocol) accepts two choices: V(lxd) or V(simplestreams).'
- required: false
- type: dict
- state:
- choices:
- - started
- - stopped
- - restarted
- - absent
- - frozen
- description:
- - Define the state of an instance.
- required: false
- default: started
- type: str
- target:
- description:
- - For cluster deployments. Will attempt to create an instance on a target node.
- If the instance exists elsewhere in a cluster, then it will not be replaced or moved.
- The name should respond to same name of the node you see in C(lxc cluster list).
- type: str
- required: false
- version_added: 1.0.0
- timeout:
- description:
- - A timeout for changing the state of the instance.
- - This is also used as a timeout for waiting until IPv4 addresses
- are set to the all network interfaces in the instance after
- starting or restarting.
- required: false
- default: 30
- type: int
- type:
- description:
- - Instance type can be either V(virtual-machine) or V(container).
- required: false
- default: container
- choices:
- - container
- - virtual-machine
- type: str
- version_added: 4.1.0
- wait_for_ipv4_addresses:
- description:
- - If this is V(true), the C(lxd_container) waits until IPv4 addresses
- are set to the all network interfaces in the instance after
- starting or restarting.
- required: false
- default: false
- type: bool
- wait_for_container:
- description:
- - If set to V(true), the tasks will wait till the task reports a
- success status when performing container operations.
- default: false
- type: bool
- version_added: 4.4.0
- force_stop:
- description:
- - If this is V(true), the C(lxd_container) forces to stop the instance
- when it stops or restarts the instance.
- required: false
- default: false
- type: bool
- url:
- description:
- - The unix domain socket path or the https URL for the LXD server.
- required: false
- default: unix:/var/lib/lxd/unix.socket
- type: str
- snap_url:
- description:
- - The unix domain socket path when LXD is installed by snap package manager.
- required: false
- default: unix:/var/snap/lxd/common/lxd/unix.socket
- type: str
- client_key:
- description:
- - The client certificate key file path.
- - If not specified, it defaults to C(${HOME}/.config/lxc/client.key).
- required: false
- aliases: [ key_file ]
- type: path
- client_cert:
- description:
- - The client certificate file path.
- - If not specified, it defaults to C(${HOME}/.config/lxc/client.crt).
- required: false
- aliases: [ cert_file ]
- type: path
- trust_password:
- description:
- - The client trusted password.
- - 'You need to set this password on the LXD server before
- running this module using the following command:
- C(lxc config set core.trust_password ).
- See U(https://www.stgraber.org/2016/04/18/lxd-api-direct-interaction/).'
- - If trust_password is set, this module send a request for
- authentication before sending any requests.
- required: false
- type: str
+ name:
+ description:
+ - Name of an instance.
+ type: str
+ required: true
+ project:
+ description:
+ - Project of an instance.
+ - See U(https://documentation.ubuntu.com/lxd/en/latest/projects/).
+ required: false
+ type: str
+ version_added: 4.8.0
+ architecture:
+ description:
+ - The architecture for the instance (for example V(x86_64) or V(i686)).
+ - See U(https://documentation.ubuntu.com/lxd/en/latest/api/#/instances/instance_get).
+ type: str
+ required: false
+ config:
+ description:
+ - 'The config for the instance (for example V({"limits.cpu": "2"})).'
+ - See U(https://documentation.ubuntu.com/lxd/en/latest/api/#/instances/instance_get).
+ - If the instance already exists and its "config" values in metadata obtained from the LXD API
+ U(https://documentation.ubuntu.com/lxd/en/latest/api/#/instances/instance_get)
+ are different, then this module tries to apply the configurations U(https://documentation.ubuntu.com/lxd/en/latest/api/#/instances/instance_put).
+ - The keys starting with C(volatile.) are ignored for this comparison when O(ignore_volatile_options=true).
+ type: dict
+ required: false
+ ignore_volatile_options:
+ description:
+ - If set to V(true), options starting with C(volatile.) are ignored. As a result, they are reapplied for each execution.
+ - This default behavior can be changed by setting this option to V(false).
+ - The default value changed from V(true) to V(false) in community.general 6.0.0.
+ type: bool
+ required: false
+ default: false
+ version_added: 3.7.0
+ profiles:
+ description:
+ - Profile to be used by the instance.
+ type: list
+ elements: str
+ devices:
+ description:
+ - 'The devices for the instance (for example V({ "rootfs": { "path": "/dev/kvm", "type": "unix-char" }})).'
+ - See U(https://documentation.ubuntu.com/lxd/en/latest/api/#/instances/instance_get).
+ type: dict
+ required: false
+ ephemeral:
+ description:
+ - Whether or not the instance is ephemeral (for example V(true) or V(false)).
+ - See U(https://documentation.ubuntu.com/lxd/en/latest/api/#/instances/instance_get).
+ required: false
+ type: bool
+ source:
+ description:
+ - 'The source for the instance (for example V({ "type": "image", "mode": "pull", "server": "https://cloud-images.ubuntu.com/releases/",
+ "protocol": "simplestreams", "alias": "22.04" })).'
+ - See U(https://documentation.ubuntu.com/lxd/en/latest/api/) for complete API documentation.
+ - 'Note that C(protocol) accepts two choices: V(lxd) or V(simplestreams).'
+ required: false
+ type: dict
+ state:
+ choices:
+ - started
+ - stopped
+ - restarted
+ - absent
+ - frozen
+ description:
+ - Define the state of an instance.
+ required: false
+ default: started
+ type: str
+ target:
+ description:
+ - For cluster deployments. It attempts to create an instance on a target node. If the instance exists elsewhere in a
+ cluster, then it is not replaced nor moved. The name should respond to same name of the node you see in C(lxc cluster
+ list).
+ type: str
+ required: false
+ version_added: 1.0.0
+ timeout:
+ description:
+ - A timeout for changing the state of the instance.
+ - This is also used as a timeout for waiting until IPv4 addresses are set to the all network interfaces in the instance
+ after starting or restarting.
+ required: false
+ default: 30
+ type: int
+ type:
+ description:
+ - Instance type can be either V(virtual-machine) or V(container).
+ required: false
+ default: container
+ choices:
+ - container
+ - virtual-machine
+ type: str
+ version_added: 4.1.0
+ wait_for_ipv4_addresses:
+ description:
+ - If this is V(true), the C(lxd_container) waits until IPv4 addresses are set to the all network interfaces in the instance
+ after starting or restarting.
+ required: false
+ default: false
+ type: bool
+ wait_for_container:
+ description:
+ - If set to V(true), the tasks wait until the task reports a success status when performing container operations.
+ default: false
+ type: bool
+ version_added: 4.4.0
+ force_stop:
+ description:
+ - If this is V(true), the C(lxd_container) forces to stop the instance when it stops or restarts the instance.
+ required: false
+ default: false
+ type: bool
+ url:
+ description:
+ - The unix domain socket path or the https URL for the LXD server.
+ required: false
+ default: unix:/var/lib/lxd/unix.socket
+ type: str
+ snap_url:
+ description:
+ - The unix domain socket path when LXD is installed by snap package manager.
+ required: false
+ default: unix:/var/snap/lxd/common/lxd/unix.socket
+ type: str
+ client_key:
+ description:
+ - The client certificate key file path.
+ - If not specified, it defaults to C(${HOME}/.config/lxc/client.key).
+ required: false
+ aliases: [key_file]
+ type: path
+ client_cert:
+ description:
+ - The client certificate file path.
+ - If not specified, it defaults to C(${HOME}/.config/lxc/client.crt).
+ required: false
+ aliases: [cert_file]
+ type: path
+ trust_password:
+ description:
+ - The client trusted password.
+ - 'You need to set this password on the LXD server before running this module using the following command: C(lxc config
+ set core.trust_password ). See U(https://www.stgraber.org/2016/04/18/lxd-api-direct-interaction/).'
+ - If trust_password is set, this module send a request for authentication before sending any requests.
+ required: false
+ type: str
notes:
- Instances can be a container or a virtual machine, both of them must have unique name. If you attempt to create an instance
- with a name that already existed in the users namespace the module will
- simply return as "unchanged".
- - There are two ways to run commands inside a container or virtual machine, using the command
- module or using the ansible lxd connection plugin bundled in Ansible >=
- 2.1, the later requires python to be installed in the instance which can
- be done with the command module.
- - You can copy a file from the host to the instance
- with the Ansible M(ansible.builtin.copy) and M(ansible.builtin.template) module
- and the P(community.general.lxd#connection) connection plugin.
- See the example below.
- - You can copy a file in the created instance to the localhost
- with C(command=lxc file pull instance_name/dir/filename filename).
+ with a name that already existed in the users namespace, the module simply returns as "unchanged".
+ - There are two ways to run commands inside a container or virtual machine, using the command module or using the ansible
+ lxd connection plugin bundled in Ansible >= 2.1, the later requires python to be installed in the instance which can be
+ done with the command module.
+ - You can copy a file from the host to the instance with the Ansible M(ansible.builtin.copy) and M(ansible.builtin.template)
+ module and the P(community.general.lxd#connection) connection plugin. See the example below.
+ - You can copy a file in the created instance to the localhost with C(command=lxc file pull instance_name/dir/filename filename).
See the first example below.
- - linuxcontainers.org has phased out LXC/LXD support with March 2024
+ - Linuxcontainers.org has phased out LXC/LXD support with March 2024
(U(https://discuss.linuxcontainers.org/t/important-notice-for-lxd-users-image-server/18479)).
Currently only Ubuntu is still providing images.
-'''
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
# An example for creating a Ubuntu container and install python
- hosts: localhost
connection: local
@@ -335,8 +318,8 @@ EXAMPLES = '''
community.general.lxd_container:
url: https://127.0.0.1:8443
# These client_cert and client_key values are equal to the default values.
- #client_cert: "{{ lookup('env', 'HOME') }}/.config/lxc/client.crt"
- #client_key: "{{ lookup('env', 'HOME') }}/.config/lxc/client.key"
+ # client_cert: "{{ lookup('env', 'HOME') }}/.config/lxc/client.crt"
+ # client_key: "{{ lookup('env', 'HOME') }}/.config/lxc/client.key"
trust_password: mypassword
name: mycontainer
state: restarted
@@ -359,7 +342,7 @@ EXAMPLES = '''
# nodes - 'node01' and 'node02'. In 'target:', 'node01' and 'node02' are names of LXD cluster
# members that LXD cluster recognizes, not ansible inventory names, see: 'lxc cluster list'.
# LXD API calls can be made to any LXD member, in this example, we send API requests to
-#'node01.example.com', which matches ansible inventory name.
+# 'node01.example.com', which matches ansible inventory name.
- hosts: node01.example.com
tasks:
- name: Create LXD container
@@ -400,17 +383,22 @@ EXAMPLES = '''
protocol: simplestreams
type: image
mode: pull
- server: [...] # URL to the image server
+ server: ['...'] # URL to the image server
alias: debian/11
timeout: 600
-'''
+"""
-RETURN = '''
+RETURN = r"""
addresses:
description: Mapping from the network device name to a list of IPv4 addresses in the instance.
returned: when state is started or restarted
type: dict
- sample: {"eth0": ["10.155.92.191"]}
+ sample:
+ {
+ "eth0": [
+ "10.155.92.191"
+ ]
+ }
old_state:
description: The old state of the instance.
returned: when state is started or restarted
@@ -426,7 +414,8 @@ actions:
returned: success
type: list
sample: ["create", "start"]
-'''
+"""
+
import copy
import datetime
import os
diff --git a/plugins/modules/lxd_profile.py b/plugins/modules/lxd_profile.py
index 13660fd91d..2525889968 100644
--- a/plugins/modules/lxd_profile.py
+++ b/plugins/modules/lxd_profile.py
@@ -9,126 +9,114 @@
from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
module: lxd_profile
short_description: Manage LXD profiles
description:
- - Management of LXD profiles
+ - Management of LXD profiles.
author: "Hiroaki Nakamura (@hnakamur)"
extends_documentation_fragment:
- community.general.attributes
attributes:
- check_mode:
- support: none
- diff_mode:
- support: none
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
options:
- name:
- description:
- - Name of a profile.
- required: true
- type: str
- project:
- description:
- - 'Project of a profile.
- See U(https://documentation.ubuntu.com/lxd/en/latest/projects/).'
- type: str
- required: false
- version_added: 4.8.0
+ name:
description:
- description:
- - Description of the profile.
- type: str
- config:
- description:
- - 'The config for the instance (e.g. {"limits.memory": "4GB"}).
- See U(https://documentation.ubuntu.com/lxd/en/latest/api/#/profiles/profile_get).'
- - If the profile already exists and its "config" value in metadata
- obtained from
- GET /1.0/profiles/
- U(https://documentation.ubuntu.com/lxd/en/latest/api/#/profiles/profile_get)
- are different, then this module tries to apply the configurations
- U(https://documentation.ubuntu.com/lxd/en/latest/api/#/profiles/profile_put).
- - Not all config values are supported to apply the existing profile.
- Maybe you need to delete and recreate a profile.
- required: false
- type: dict
- devices:
- description:
- - 'The devices for the profile
- (e.g. {"rootfs": {"path": "/dev/kvm", "type": "unix-char"}).
- See U(https://documentation.ubuntu.com/lxd/en/latest/api/#/profiles/profile_get).'
- required: false
- type: dict
- new_name:
- description:
- - A new name of a profile.
- - If this parameter is specified a profile will be renamed to this name.
- See U(https://documentation.ubuntu.com/lxd/en/latest/api/#/profiles/profile_post).
- required: false
- type: str
- merge_profile:
- description:
- - Merge the configuration of the present profile with the new desired configuration,
- instead of replacing it.
- required: false
- default: false
- type: bool
- version_added: 2.1.0
- state:
- choices:
- - present
- - absent
- description:
- - Define the state of a profile.
- required: false
- default: present
- type: str
- url:
- description:
- - The unix domain socket path or the https URL for the LXD server.
- required: false
- default: unix:/var/lib/lxd/unix.socket
- type: str
- snap_url:
- description:
- - The unix domain socket path when LXD is installed by snap package manager.
- required: false
- default: unix:/var/snap/lxd/common/lxd/unix.socket
- type: str
- client_key:
- description:
- - The client certificate key file path.
- - If not specified, it defaults to C($HOME/.config/lxc/client.key).
- required: false
- aliases: [ key_file ]
- type: path
- client_cert:
- description:
- - The client certificate file path.
- - If not specified, it defaults to C($HOME/.config/lxc/client.crt).
- required: false
- aliases: [ cert_file ]
- type: path
- trust_password:
- description:
- - The client trusted password.
- - You need to set this password on the LXD server before
- running this module using the following command.
- lxc config set core.trust_password
- See U(https://www.stgraber.org/2016/04/18/lxd-api-direct-interaction/)
- - If trust_password is set, this module send a request for
- authentication before sending any requests.
- required: false
- type: str
+ - Name of a profile.
+ required: true
+ type: str
+ project:
+ description:
+ - Project of a profile. See U(https://documentation.ubuntu.com/lxd/en/latest/projects/).
+ type: str
+ required: false
+ version_added: 4.8.0
+ description:
+ description:
+ - Description of the profile.
+ type: str
+ config:
+ description:
+ - 'The config for the instance (for example V({"limits.memory": "4GB"})).'
+ - See U(https://documentation.ubuntu.com/lxd/en/latest/api/#/profiles/profile_get).
+ - If the profile already exists and its C(config) value in metadata obtained from GET /1.0/profiles/
+ U(https://documentation.ubuntu.com/lxd/en/latest/api/#/profiles/profile_get)
+ are different, then this module tries to apply the configurations U(https://documentation.ubuntu.com/lxd/en/latest/api/#/profiles/profile_put).
+ - Not all config values are supported to apply the existing profile. Maybe you need to delete and recreate a profile.
+ required: false
+ type: dict
+ devices:
+ description:
+ - 'The devices for the profile (for example V({"rootfs": {"path": "/dev/kvm", "type": "unix-char"})).'
+ - See U(https://documentation.ubuntu.com/lxd/en/latest/api/#/profiles/profile_get).
+ required: false
+ type: dict
+ new_name:
+ description:
+ - A new name of a profile.
+ - If this parameter is specified a profile is renamed to this name.
+ - See U(https://documentation.ubuntu.com/lxd/en/latest/api/#/profiles/profile_post).
+ required: false
+ type: str
+ merge_profile:
+ description:
+ - Merge the configuration of the present profile with the new desired configuration, instead of replacing it.
+ required: false
+ default: false
+ type: bool
+ version_added: 2.1.0
+ state:
+ choices:
+ - present
+ - absent
+ description:
+ - Define the state of a profile.
+ required: false
+ default: present
+ type: str
+ url:
+ description:
+ - The unix domain socket path or the https URL for the LXD server.
+ required: false
+ default: unix:/var/lib/lxd/unix.socket
+ type: str
+ snap_url:
+ description:
+ - The unix domain socket path when LXD is installed by snap package manager.
+ required: false
+ default: unix:/var/snap/lxd/common/lxd/unix.socket
+ type: str
+ client_key:
+ description:
+ - The client certificate key file path.
+ - If not specified, it defaults to C($HOME/.config/lxc/client.key).
+ required: false
+ aliases: [key_file]
+ type: path
+ client_cert:
+ description:
+ - The client certificate file path.
+ - If not specified, it defaults to C($HOME/.config/lxc/client.crt).
+ required: false
+ aliases: [cert_file]
+ type: path
+ trust_password:
+ description:
+ - The client trusted password.
+ - 'You need to set this password on the LXD server before running this module using the following command: C(lxc config
+ set core.trust_password ). See U(https://www.stgraber.org/2016/04/18/lxd-api-direct-interaction/).'
+ - If O(trust_password) is set, this module send a request for authentication before sending any requests.
+ required: false
+ type: str
notes:
- - Profiles must have a unique name. If you attempt to create a profile
- with a name that already existed in the users namespace the module will
- simply return as "unchanged".
-'''
+ - Profiles must have a unique name. If you attempt to create a profile with a name that already existed in the users namespace
+ the module simply returns as "unchanged".
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
# An example for creating a profile
- hosts: localhost
connection: local
@@ -162,22 +150,22 @@ EXAMPLES = '''
- hosts: localhost
connection: local
tasks:
- - name: Create macvlan profile
- community.general.lxd_profile:
- url: https://127.0.0.1:8443
- # These client_cert and client_key values are equal to the default values.
- #client_cert: "{{ lookup('env', 'HOME') }}/.config/lxc/client.crt"
- #client_key: "{{ lookup('env', 'HOME') }}/.config/lxc/client.key"
- trust_password: mypassword
- name: macvlan
- state: present
- config: {}
- description: my macvlan profile
- devices:
- eth0:
- nictype: macvlan
- parent: br0
- type: nic
+ - name: Create macvlan profile
+ community.general.lxd_profile:
+ url: https://127.0.0.1:8443
+ # These client_cert and client_key values are equal to the default values.
+ # client_cert: "{{ lookup('env', 'HOME') }}/.config/lxc/client.crt"
+ # client_key: "{{ lookup('env', 'HOME') }}/.config/lxc/client.key"
+ trust_password: mypassword
+ name: macvlan
+ state: present
+ config: {}
+ description: my macvlan profile
+ devices:
+ eth0:
+ nictype: macvlan
+ parent: br0
+ type: nic
# An example for modify/merge a profile
- hosts: localhost
@@ -214,11 +202,11 @@ EXAMPLES = '''
name: macvlan
new_name: macvlan2
state: present
-'''
+"""
-RETURN = '''
+RETURN = r"""
old_state:
- description: The old state of the profile
+ description: The old state of the profile.
returned: success
type: str
sample: "absent"
@@ -232,7 +220,7 @@ actions:
returned: success
type: list
sample: ["create"]
-'''
+"""
import os
from ansible.module_utils.basic import AnsibleModule
diff --git a/plugins/modules/lxd_project.py b/plugins/modules/lxd_project.py
index 0d321808a2..20804f8b38 100644
--- a/plugins/modules/lxd_project.py
+++ b/plugins/modules/lxd_project.py
@@ -7,8 +7,7 @@
from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
module: lxd_project
short_description: Manage LXD projects
version_added: 4.8.0
@@ -18,98 +17,91 @@ author: "Raymond Chang (@we10710aa)"
extends_documentation_fragment:
- community.general.attributes
attributes:
- check_mode:
- support: none
- diff_mode:
- support: none
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
options:
- name:
- description:
- - Name of the project.
- required: true
- type: str
+ name:
description:
- description:
- - Description of the project.
- type: str
- config:
- description:
- - 'The config for the project (for example V({"features.profiles": "true"})).
- See U(https://documentation.ubuntu.com/lxd/en/latest/api/#/projects/project_get).'
- - If the project already exists and its "config" value in metadata
- obtained from
- C(GET /1.0/projects/)
- U(https://documentation.ubuntu.com/lxd/en/latest/api/#/projects/project_get)
- are different, then this module tries to apply the configurations
- U(https://documentation.ubuntu.com/lxd/en/latest/api/#/projects/project_put).
- type: dict
- new_name:
- description:
- - A new name of a project.
- - If this parameter is specified a project will be renamed to this name.
- See U(https://documentation.ubuntu.com/lxd/en/latest/api/#/projects/project_post).
- required: false
- type: str
- merge_project:
- description:
- - Merge the configuration of the present project with the new desired configuration,
- instead of replacing it. If configuration is the same after merged, no change will be made.
- required: false
- default: false
- type: bool
- state:
- choices:
- - present
- - absent
- description:
- - Define the state of a project.
- required: false
- default: present
- type: str
- url:
- description:
- - The Unix domain socket path or the https URL for the LXD server.
- required: false
- default: unix:/var/lib/lxd/unix.socket
- type: str
- snap_url:
- description:
- - The Unix domain socket path when LXD is installed by snap package manager.
- required: false
- default: unix:/var/snap/lxd/common/lxd/unix.socket
- type: str
- client_key:
- description:
- - The client certificate key file path.
- - If not specified, it defaults to C($HOME/.config/lxc/client.key).
- required: false
- aliases: [ key_file ]
- type: path
- client_cert:
- description:
- - The client certificate file path.
- - If not specified, it defaults to C($HOME/.config/lxc/client.crt).
- required: false
- aliases: [ cert_file ]
- type: path
- trust_password:
- description:
- - The client trusted password.
- - 'You need to set this password on the LXD server before
- running this module using the following command:
- C(lxc config set core.trust_password )
- See U(https://www.stgraber.org/2016/04/18/lxd-api-direct-interaction/).'
- - If O(trust_password) is set, this module send a request for
- authentication before sending any requests.
- required: false
- type: str
+ - Name of the project.
+ required: true
+ type: str
+ description:
+ description:
+ - Description of the project.
+ type: str
+ config:
+ description:
+ - 'The config for the project (for example V({"features.profiles": "true"})).'
+ - See U(https://documentation.ubuntu.com/lxd/en/latest/api/#/projects/project_get).
+ - If the project already exists and its "config" value in metadata obtained from C(GET /1.0/projects/)
+ U(https://documentation.ubuntu.com/lxd/en/latest/api/#/projects/project_get)
+ are different, then this module tries to apply the configurations U(https://documentation.ubuntu.com/lxd/en/latest/api/#/projects/project_put).
+ type: dict
+ new_name:
+ description:
+ - A new name of a project.
+ - If this parameter is specified a project is renamed to this name.
+ - See U(https://documentation.ubuntu.com/lxd/en/latest/api/#/projects/project_post).
+ required: false
+ type: str
+ merge_project:
+ description:
+ - Merge the configuration of the present project with the new desired configuration, instead of replacing it. If configuration
+ is the same after merged, no change is made.
+ required: false
+ default: false
+ type: bool
+ state:
+ choices:
+ - present
+ - absent
+ description:
+ - Define the state of a project.
+ required: false
+ default: present
+ type: str
+ url:
+ description:
+ - The Unix domain socket path or the https URL for the LXD server.
+ required: false
+ default: unix:/var/lib/lxd/unix.socket
+ type: str
+ snap_url:
+ description:
+ - The Unix domain socket path when LXD is installed by snap package manager.
+ required: false
+ default: unix:/var/snap/lxd/common/lxd/unix.socket
+ type: str
+ client_key:
+ description:
+ - The client certificate key file path.
+ - If not specified, it defaults to C($HOME/.config/lxc/client.key).
+ required: false
+ aliases: [key_file]
+ type: path
+ client_cert:
+ description:
+ - The client certificate file path.
+ - If not specified, it defaults to C($HOME/.config/lxc/client.crt).
+ required: false
+ aliases: [cert_file]
+ type: path
+ trust_password:
+ description:
+ - The client trusted password.
+ - 'You need to set this password on the LXD server before running this module using the following command: C(lxc config
+ set core.trust_password ) See U(https://www.stgraber.org/2016/04/18/lxd-api-direct-interaction/).'
+ - If O(trust_password) is set, this module send a request for authentication before sending any requests.
+ required: false
+ type: str
notes:
- - Projects must have a unique name. If you attempt to create a project
- with a name that already existed in the users namespace the module will
- simply return as "unchanged".
-'''
+ - Projects must have a unique name. If you attempt to create a project with a name that already existed in the users namespace
+ the module simply returns as "unchanged".
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
# An example for creating a project
- hosts: localhost
connection: local
@@ -132,9 +124,9 @@ EXAMPLES = '''
state: present
config: {}
description: my new project
-'''
+"""
-RETURN = '''
+RETURN = r"""
old_state:
description: The old state of the project.
returned: success
@@ -184,7 +176,7 @@ actions:
type: list
elements: str
sample: ["create"]
-'''
+"""
from ansible_collections.community.general.plugins.module_utils.lxd import (
LXDClient, LXDClientException, default_key_file, default_cert_file
diff --git a/plugins/modules/macports.py b/plugins/modules/macports.py
index cd620687d7..c328e45904 100644
--- a/plugins/modules/macports.py
+++ b/plugins/modules/macports.py
@@ -12,54 +12,54 @@ from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
module: macports
author: "Jimmy Tang (@jcftang)"
short_description: Package manager for MacPorts
description:
- - Manages MacPorts packages (ports)
+ - Manages MacPorts packages (ports).
extends_documentation_fragment:
- - community.general.attributes
+ - community.general.attributes
attributes:
- check_mode:
- support: none
- diff_mode:
- support: none
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
options:
- name:
- description:
- - A list of port names.
- aliases: ['port']
- type: list
- elements: str
- selfupdate:
- description:
- - Update Macports and the ports tree, either prior to installing ports or as a separate step.
- - Equivalent to running C(port selfupdate).
- aliases: ['update_cache', 'update_ports']
- default: false
- type: bool
- state:
- description:
- - Indicates the desired state of the port.
- choices: [ 'present', 'absent', 'active', 'inactive', 'installed', 'removed']
- default: present
- type: str
- upgrade:
- description:
- - Upgrade all outdated ports, either prior to installing ports or as a separate step.
- - Equivalent to running C(port upgrade outdated).
- default: false
- type: bool
- variant:
- description:
- - A port variant specification.
- - 'O(variant) is only supported with O(state=installed) and O(state=present).'
- aliases: ['variants']
- type: str
-'''
-EXAMPLES = '''
+ name:
+ description:
+ - A list of port names.
+ aliases: ['port']
+ type: list
+ elements: str
+ selfupdate:
+ description:
+ - Update Macports and the ports tree, either prior to installing ports or as a separate step.
+ - Equivalent to running C(port selfupdate).
+ aliases: ['update_cache', 'update_ports']
+ default: false
+ type: bool
+ state:
+ description:
+ - Indicates the desired state of the port.
+ choices: ['present', 'absent', 'active', 'inactive', 'installed', 'removed']
+ default: present
+ type: str
+ upgrade:
+ description:
+ - Upgrade all outdated ports, either prior to installing ports or as a separate step.
+ - Equivalent to running C(port upgrade outdated).
+ default: false
+ type: bool
+ variant:
+ description:
+ - A port variant specification.
+ - O(variant) is only supported with O(state=installed) and O(state=present).
+ aliases: ['variants']
+ type: str
+"""
+
+EXAMPLES = r"""
- name: Install the foo port
community.general.macports:
name: foo
@@ -74,8 +74,8 @@ EXAMPLES = '''
name: "{{ ports }}"
vars:
ports:
- - foo
- - foo-tools
+ - foo
+ - foo-tools
- name: Update Macports and the ports tree, then upgrade all outdated ports
community.general.macports:
@@ -101,7 +101,7 @@ EXAMPLES = '''
community.general.macports:
name: foo
state: inactive
-'''
+"""
import re
@@ -221,7 +221,7 @@ def install_ports(module, port_path, ports, variant, stdout, stderr):
def activate_ports(module, port_path, ports, stdout, stderr):
- """ Activate a port if it's inactive. """
+ """ Activate a port if it is inactive. """
activate_c = 0
@@ -248,7 +248,7 @@ def activate_ports(module, port_path, ports, stdout, stderr):
def deactivate_ports(module, port_path, ports, stdout, stderr):
- """ Deactivate a port if it's active. """
+ """ Deactivate a port if it is active. """
deactivated_c = 0
@@ -280,7 +280,7 @@ def main():
selfupdate=dict(aliases=["update_cache", "update_ports"], default=False, type='bool'),
state=dict(default="present", choices=["present", "installed", "absent", "removed", "active", "inactive"]),
upgrade=dict(default=False, type='bool'),
- variant=dict(aliases=["variants"], default=None, type='str')
+ variant=dict(aliases=["variants"], type='str')
)
)
diff --git a/plugins/modules/mail.py b/plugins/modules/mail.py
index 1916c140c3..7c8bdb69b3 100644
--- a/plugins/modules/mail.py
+++ b/plugins/modules/mail.py
@@ -9,27 +9,21 @@ from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = r'''
----
+DOCUMENTATION = r"""
author:
-- Dag Wieers (@dagwieers)
+ - Dag Wieers (@dagwieers)
module: mail
short_description: Send an email
description:
-- This module is useful for sending emails from playbooks.
-- One may wonder why automate sending emails? In complex environments
- there are from time to time processes that cannot be automated, either
- because you lack the authority to make it so, or because not everyone
- agrees to a common approach.
-- If you cannot automate a specific step, but the step is non-blocking,
- sending out an email to the responsible party to make them perform their
- part of the bargain is an elegant way to put the responsibility in
- someone else's lap.
-- Of course sending out a mail can be equally useful as a way to notify
- one or more people in a team that a specific action has been
- (successfully) taken.
+ - This module is useful for sending emails from playbooks.
+ - One may wonder why automate sending emails? In complex environments there are from time to time processes that cannot
+ be automated, either because you lack the authority to make it so, or because not everyone agrees to a common approach.
+ - If you cannot automate a specific step, but the step is non-blocking, sending out an email to the responsible party to
+ make them perform their part of the bargain is an elegant way to put the responsibility in someone else's lap.
+ - Of course sending out a mail can be equally useful as a way to notify one or more people in a team that a specific action
+ has been (successfully) taken.
extends_documentation_fragment:
-- community.general.attributes
+ - community.general.attributes
attributes:
check_mode:
support: none
@@ -38,118 +32,118 @@ attributes:
options:
sender:
description:
- - The email-address the mail is sent from. May contain address and phrase.
+ - The email-address the mail is sent from. May contain address and phrase.
type: str
default: root
- aliases: [ from ]
+ aliases: [from]
to:
description:
- - The email-address(es) the mail is being sent to.
- - This is a list, which may contain address and phrase portions.
+ - The email-address(es) the mail is being sent to.
+ - This is a list, which may contain address and phrase portions.
type: list
elements: str
default: root
- aliases: [ recipients ]
+ aliases: [recipients]
cc:
description:
- - The email-address(es) the mail is being copied to.
- - This is a list, which may contain address and phrase portions.
+ - The email-address(es) the mail is being copied to.
+ - This is a list, which may contain address and phrase portions.
type: list
elements: str
default: []
bcc:
description:
- - The email-address(es) the mail is being 'blind' copied to.
- - This is a list, which may contain address and phrase portions.
+ - The email-address(es) the mail is being 'blind' copied to.
+ - This is a list, which may contain address and phrase portions.
type: list
elements: str
default: []
subject:
description:
- - The subject of the email being sent.
+ - The subject of the email being sent.
required: true
type: str
- aliases: [ msg ]
+ aliases: [msg]
body:
description:
- - The body of the email being sent.
+ - The body of the email being sent.
type: str
username:
description:
- - If SMTP requires username.
+ - If SMTP requires username.
type: str
password:
description:
- - If SMTP requires password.
+ - If SMTP requires password.
type: str
host:
description:
- - The mail server.
+ - The mail server.
type: str
default: localhost
port:
description:
- - The mail server port.
- - This must be a valid integer between 1 and 65534
+ - The mail server port.
+ - This must be a valid integer between V(1) and V(65534).
type: int
default: 25
attach:
description:
- - A list of pathnames of files to attach to the message.
- - Attached files will have their content-type set to C(application/octet-stream).
+ - A list of pathnames of files to attach to the message.
+ - Attached files have their content-type set to C(application/octet-stream).
type: list
elements: path
default: []
headers:
description:
- - A list of headers which should be added to the message.
- - Each individual header is specified as C(header=value) (see example below).
+ - A list of headers which should be added to the message.
+ - Each individual header is specified as V(header=value) (see example below).
type: list
elements: str
default: []
charset:
description:
- - The character set of email being sent.
+ - The character set of email being sent.
type: str
default: utf-8
subtype:
description:
- - The minor mime type, can be either V(plain) or V(html).
- - The major type is always V(text).
+ - The minor mime type, can be either V(plain) or V(html).
+ - The major type is always V(text).
type: str
- choices: [ html, plain ]
+ choices: [html, plain]
default: plain
secure:
description:
- - If V(always), the connection will only send email if the connection is Encrypted.
- If the server doesn't accept the encrypted connection it will fail.
- - If V(try), the connection will attempt to setup a secure SSL/TLS session, before trying to send.
- - If V(never), the connection will not attempt to setup a secure SSL/TLS session, before sending
- - If V(starttls), the connection will try to upgrade to a secure SSL/TLS connection, before sending.
- If it is unable to do so it will fail.
+ - If V(always), the connection only sends email if the connection is Encrypted. If the server does not accept the encrypted
+ connection it fails.
+ - If V(try), the connection attempts to setup a secure SSL/TLS session, before trying to send.
+ - If V(never), the connection does not attempt to setup a secure SSL/TLS session, before sending.
+ - If V(starttls), the connection tries to upgrade to a secure SSL/TLS connection, before sending. If it is unable to
+ do so it fails.
type: str
- choices: [ always, never, starttls, try ]
+ choices: [always, never, starttls, try]
default: try
timeout:
description:
- - Sets the timeout in seconds for connection attempts.
+ - Sets the timeout in seconds for connection attempts.
type: int
default: 20
ehlohost:
description:
- - Allows for manual specification of host for EHLO.
+ - Allows for manual specification of host for EHLO.
type: str
version_added: 3.8.0
message_id_domain:
description:
- The domain name to use for the L(Message-ID header, https://en.wikipedia.org/wiki/Message-ID).
- - Note that this is only available on Python 3+. On Python 2, this value will be ignored.
+ - Note that this is only available on Python 3+. On Python 2, this value is ignored.
type: str
default: ansible
version_added: 8.2.0
-'''
+"""
-EXAMPLES = r'''
+EXAMPLES = r"""
- name: Example playbook sending mail to root
community.general.mail:
subject: System {{ ansible_hostname }} has been successfully provisioned.
@@ -174,15 +168,15 @@ EXAMPLES = r'''
body: Hello, this is an e-mail. I hope you like it ;-)
from: jane@example.net (Jane Jolie)
to:
- - John Doe
- - Suzie Something
+ - John Doe
+ - Suzie Something
cc: Charlie Root
attach:
- - /etc/group
- - /tmp/avatar2.png
+ - /etc/group
+ - /tmp/avatar2.png
headers:
- - Reply-To=john@example.com
- - X-Special="Something or other"
+ - Reply-To=john@example.com
+ - X-Special="Something or other"
charset: us-ascii
delegate_to: localhost
@@ -222,7 +216,7 @@ EXAMPLES = r'''
subject: Ansible-report
body: System {{ ansible_hostname }} has been successfully provisioned.
secure: starttls
-'''
+"""
import os
import smtplib
@@ -248,7 +242,7 @@ def main():
password=dict(type='str', no_log=True),
host=dict(type='str', default='localhost'),
port=dict(type='int', default=25),
- ehlohost=dict(type='str', default=None),
+ ehlohost=dict(type='str'),
sender=dict(type='str', default='root', aliases=['from']),
to=dict(type='list', elements='str', default=['root'], aliases=['recipients']),
cc=dict(type='list', elements='str', default=[]),
diff --git a/plugins/modules/make.py b/plugins/modules/make.py
index 39392afca6..57ee525db5 100644
--- a/plugins/modules/make.py
+++ b/plugins/modules/make.py
@@ -8,8 +8,7 @@
from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = r'''
----
+DOCUMENTATION = r"""
module: make
short_description: Run targets in a Makefile
requirements:
@@ -49,7 +48,7 @@ options:
params:
description:
- Any extra parameters to pass to make.
- - If the value is empty, only the key will be used. For example, V(FOO:) will produce V(FOO), not V(FOO=).
+ - If the value is empty, only the key is used. For example, V(FOO:) produces V(FOO), not V(FOO=).
type: dict
target:
description:
@@ -65,9 +64,9 @@ options:
type: list
elements: str
version_added: 7.2.0
-'''
+"""
-EXAMPLES = r'''
+EXAMPLES = r"""
- name: Build the default target
community.general.make:
chdir: /home/ubuntu/cool-project
@@ -103,9 +102,9 @@ EXAMPLES = r'''
# The following adds TARGET=arm64 TARGET_ARCH=aarch64 to the command line:
TARGET: arm64
TARGET_ARCH: aarch64
-'''
+"""
-RETURN = r'''
+RETURN = r"""
chdir:
description:
- The value of the module parameter O(chdir).
@@ -143,7 +142,7 @@ targets:
type: str
returned: success
version_added: 7.2.0
-'''
+"""
from ansible.module_utils.six import iteritems
from ansible.module_utils.six.moves import shlex_quote
diff --git a/plugins/modules/manageiq_alert_profiles.py b/plugins/modules/manageiq_alert_profiles.py
index eb6424bcdd..fff9552a6c 100644
--- a/plugins/modules/manageiq_alert_profiles.py
+++ b/plugins/modules/manageiq_alert_profiles.py
@@ -8,8 +8,7 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
-DOCUMENTATION = '''
-
+DOCUMENTATION = r"""
module: manageiq_alert_profiles
short_description: Configuration of alert profiles for ManageIQ
@@ -20,7 +19,6 @@ extends_documentation_fragment:
author: Elad Alfassa (@elad661)
description:
- The manageiq_alert_profiles module supports adding, updating and deleting alert profiles in ManageIQ.
-
attributes:
check_mode:
support: none
@@ -31,35 +29,33 @@ options:
state:
type: str
description:
- - absent - alert profile should not exist,
- - present - alert profile should exist,
+ - V(absent) - alert profile should not exist,
+ - V(present) - alert profile should exist.
choices: ['absent', 'present']
default: 'present'
name:
type: str
description:
- The unique alert profile name in ManageIQ.
- - Required when state is "absent" or "present".
+ required: true
resource_type:
type: str
description:
- - The resource type for the alert profile in ManageIQ. Required when state is "present".
- choices: ['Vm', 'ContainerNode', 'MiqServer', 'Host', 'Storage', 'EmsCluster',
- 'ExtManagementSystem', 'MiddlewareServer']
+ - The resource type for the alert profile in ManageIQ. Required when O(state=present).
+ choices: ['Vm', 'ContainerNode', 'MiqServer', 'Host', 'Storage', 'EmsCluster', 'ExtManagementSystem', 'MiddlewareServer']
alerts:
type: list
elements: str
description:
- List of alert descriptions to assign to this profile.
- - Required if state is "present"
+ - Required if O(state=present).
notes:
type: str
description:
- - Optional notes for this profile
+ - Optional notes for this profile.
+"""
-'''
-
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Add an alert profile to ManageIQ
community.general.manageiq_alert_profiles:
state: present
@@ -72,7 +68,7 @@ EXAMPLES = '''
url: 'http://127.0.0.1:3000'
username: 'admin'
password: 'smartvm'
- validate_certs: false # only do this when you trust the network!
+ validate_certs: false # only do this when you trust the network!
- name: Delete an alert profile from ManageIQ
community.general.manageiq_alert_profiles:
@@ -82,11 +78,11 @@ EXAMPLES = '''
url: 'http://127.0.0.1:3000'
username: 'admin'
password: 'smartvm'
- validate_certs: false # only do this when you trust the network!
-'''
+ validate_certs: false # only do this when you trust the network!
+"""
-RETURN = '''
-'''
+RETURN = r"""
+"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.general.plugins.module_utils.manageiq import ManageIQ, manageiq_argument_spec
@@ -118,8 +114,7 @@ class ManageIQAlertProfiles(object):
"""
alerts = []
for alert_description in alert_descriptions:
- alert = self.manageiq.find_collection_resource_or_fail("alert_definitions",
- description=alert_description)
+ alert = self.manageiq.find_collection_resource_or_fail("alert_definitions", description=alert_description)
alerts.append(alert['href'])
return alerts
@@ -257,7 +252,7 @@ class ManageIQAlertProfiles(object):
def main():
argument_spec = dict(
- name=dict(type='str'),
+ name=dict(type='str', required=True),
resource_type=dict(type='str', choices=['Vm',
'ContainerNode',
'MiqServer',
@@ -274,8 +269,7 @@ def main():
argument_spec.update(manageiq_argument_spec())
module = AnsibleModule(argument_spec=argument_spec,
- required_if=[('state', 'present', ['name', 'resource_type']),
- ('state', 'absent', ['name'])])
+ required_if=[('state', 'present', ['resource_type', 'alerts'])])
state = module.params['state']
name = module.params['name']
@@ -283,8 +277,7 @@ def main():
manageiq = ManageIQ(module)
manageiq_alert_profiles = ManageIQAlertProfiles(manageiq)
- existing_profile = manageiq.find_collection_resource_by("alert_definition_profiles",
- name=name)
+ existing_profile = manageiq.find_collection_resource_by("alert_definition_profiles", name=name)
# we need to add or update the alert profile
if state == "present":
diff --git a/plugins/modules/manageiq_alerts.py b/plugins/modules/manageiq_alerts.py
index 53f40fb00c..d1b3fdba69 100644
--- a/plugins/modules/manageiq_alerts.py
+++ b/plugins/modules/manageiq_alerts.py
@@ -8,8 +8,7 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
-DOCUMENTATION = '''
-
+DOCUMENTATION = r"""
module: manageiq_alerts
short_description: Configuration of alerts in ManageIQ
@@ -20,7 +19,6 @@ extends_documentation_fragment:
author: Elad Alfassa (@elad661)
description:
- The manageiq_alerts module supports adding, updating and deleting alerts in ManageIQ.
-
attributes:
check_mode:
support: none
@@ -31,8 +29,8 @@ options:
state:
type: str
description:
- - absent - alert should not exist,
- - present - alert should exist,
+ - V(absent) - alert should not exist,
+ - V(present) - alert should exist.
required: false
choices: ['absent', 'present']
default: 'present'
@@ -44,9 +42,8 @@ options:
resource_type:
type: str
description:
- - The entity type for the alert in ManageIQ. Required when state is "present".
- choices: ['Vm', 'ContainerNode', 'MiqServer', 'Host', 'Storage', 'EmsCluster',
- 'ExtManagementSystem', 'MiddlewareServer']
+ - The entity type for the alert in ManageIQ. Required when O(state=present).
+ choices: ['Vm', 'ContainerNode', 'MiqServer', 'Host', 'Storage', 'EmsCluster', 'ExtManagementSystem', 'MiddlewareServer']
expression_type:
type: str
description:
@@ -58,20 +55,18 @@ options:
description:
- The alert expression for ManageIQ.
- Can either be in the "Miq Expression" format or the "Hash Expression format".
- - Required if state is "present".
+ - Required if O(state=present).
enabled:
description:
- - Enable or disable the alert. Required if state is "present".
+ - Enable or disable the alert. Required if O(state=present).
type: bool
options:
type: dict
description:
- - Additional alert options, such as notification type and frequency
+ - Additional alert options, such as notification type and frequency.
+"""
-
-'''
-
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Add an alert with a "hash expression" to ManageIQ
community.general.manageiq_alerts:
state: present
@@ -83,15 +78,15 @@ EXAMPLES = '''
from: "example@example.com"
resource_type: ContainerNode
expression:
- eval_method: hostd_log_threshold
- mode: internal
- options: {}
+ eval_method: hostd_log_threshold
+ mode: internal
+ options: {}
enabled: true
manageiq_connection:
url: 'http://127.0.0.1:3000'
username: 'admin'
password: 'smartvm'
- validate_certs: false # only do this when you trust the network!
+ validate_certs: false # only do this when you trust the network!
- name: Add an alert with a "miq expression" to ManageIQ
community.general.manageiq_alerts:
@@ -105,20 +100,20 @@ EXAMPLES = '''
resource_type: Vm
expression_type: miq
expression:
- and:
- - CONTAINS:
- tag: Vm.managed-environment
- value: prod
- - not:
- CONTAINS:
- tag: Vm.host.managed-environment
- value: prod
+ and:
+ - CONTAINS:
+ tag: Vm.managed-environment
+ value: prod
+ - not:
+ CONTAINS:
+ tag: Vm.host.managed-environment
+ value: prod
enabled: true
manageiq_connection:
url: 'http://127.0.0.1:3000'
username: 'admin'
password: 'smartvm'
- validate_certs: false # only do this when you trust the network!
+ validate_certs: false # only do this when you trust the network!
- name: Delete an alert from ManageIQ
community.general.manageiq_alerts:
@@ -128,11 +123,11 @@ EXAMPLES = '''
url: 'http://127.0.0.1:3000'
username: 'admin'
password: 'smartvm'
- validate_certs: false # only do this when you trust the network!
-'''
+ validate_certs: false # only do this when you trust the network!
+"""
-RETURN = '''
-'''
+RETURN = r"""
+"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.general.plugins.module_utils.manageiq import ManageIQ, manageiq_argument_spec
@@ -156,7 +151,7 @@ class ManageIQAlert(object):
self.miq_expression = alert['miq_expression']
if 'exp' in self.miq_expression:
# miq_expression is a field that needs a special case, because
- # it's returned surrounded by a dict named exp even though we don't
+ # it is returned surrounded by a dict named exp even though we don't
# send it with that dict.
self.miq_expression = self.miq_expression['exp']
@@ -305,7 +300,7 @@ def main():
expression=dict(type='dict'),
options=dict(type='dict'),
enabled=dict(type='bool'),
- state=dict(required=False, default='present',
+ state=dict(default='present',
choices=['present', 'absent']),
)
# add the manageiq connection arguments to the arguments
diff --git a/plugins/modules/manageiq_group.py b/plugins/modules/manageiq_group.py
index e060b9a01a..68170ea733 100644
--- a/plugins/modules/manageiq_group.py
+++ b/plugins/modules/manageiq_group.py
@@ -8,8 +8,7 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
-DOCUMENTATION = '''
-
+DOCUMENTATION = r"""
module: manageiq_group
short_description: Management of groups in ManageIQ
@@ -33,70 +32,69 @@ options:
state:
type: str
description:
- - absent - group should not exist, present - group should be.
+ - V(absent) - group should not exist,
+ - V(present) - group should exist.
choices: ['absent', 'present']
default: 'present'
description:
type: str
description:
- - The group description.
+ - The group description.
required: true
- default: null
role_id:
type: int
description:
- - The the group role id
+ - The the group role ID.
required: false
- default: null
role:
type: str
description:
- - The the group role name
- - The O(role_id) has precedence over the O(role) when supplied.
+ - The the group role name.
+ - The O(role_id) has precedence over the O(role) when supplied.
required: false
- default: null
+ default:
tenant_id:
type: int
description:
- - The tenant for the group identified by the tenant id.
+ - The tenant for the group identified by the tenant ID.
required: false
- default: null
+ default:
tenant:
type: str
description:
- - The tenant for the group identified by the tenant name.
- - The O(tenant_id) has precedence over the O(tenant) when supplied.
- - Tenant names are case sensitive.
+ - The tenant for the group identified by the tenant name.
+ - The O(tenant_id) has precedence over the O(tenant) when supplied.
+ - Tenant names are case sensitive.
required: false
- default: null
+ default:
managed_filters:
- description: The tag values per category
+ description: The tag values per category.
type: dict
required: false
- default: null
+ default:
managed_filters_merge_mode:
type: str
description:
- - In merge mode existing categories are kept or updated, new categories are added.
- - In replace mode all categories will be replaced with the supplied O(managed_filters).
- choices: [ merge, replace ]
+ - In V(merge) mode existing categories are kept or updated, new categories are added.
+ - In V(replace) mode all categories are replaced with the supplied O(managed_filters).
+ choices: [merge, replace]
default: replace
belongsto_filters:
- description: A list of strings with a reference to the allowed host, cluster or folder
+ description: A list of strings with a reference to the allowed host, cluster or folder.
type: list
elements: str
required: false
- default: null
+ default:
belongsto_filters_merge_mode:
type: str
description:
- - In merge mode existing settings are merged with the supplied O(belongsto_filters).
- - In replace mode current values are replaced with the supplied O(belongsto_filters).
- choices: [ merge, replace ]
+ - In merge mode existing settings are merged with the supplied O(belongsto_filters).
+ - In replace mode current values are replaced with the supplied O(belongsto_filters).
+ choices: [merge, replace]
default: replace
-'''
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Create a group in ManageIQ with the role EvmRole-user and tenant 'my_tenant'
community.general.manageiq_group:
description: 'MyGroup-user'
@@ -106,7 +104,7 @@ EXAMPLES = '''
url: 'http://127.0.0.1:3000'
username: 'admin'
password: 'smartvm'
- validate_certs: false # only do this when you trust the network!
+ validate_certs: false # only do this when you trust the network!
- name: Create a group in ManageIQ with the role EvmRole-user and tenant with tenant_id 4
community.general.manageiq_group:
@@ -117,33 +115,33 @@ EXAMPLES = '''
url: 'http://127.0.0.1:3000'
username: 'admin'
password: 'smartvm'
- validate_certs: false # only do this when you trust the network!
+ validate_certs: false # only do this when you trust the network!
- name:
- - Create or update a group in ManageIQ with the role EvmRole-user and tenant my_tenant.
- - Apply 3 prov_max_cpu and 2 department tags to the group.
- - Limit access to a cluster for the group.
+ - Create or update a group in ManageIQ with the role EvmRole-user and tenant my_tenant.
+ - Apply 3 prov_max_cpu and 2 department tags to the group.
+ - Limit access to a cluster for the group.
community.general.manageiq_group:
description: 'MyGroup-user'
role: 'EvmRole-user'
tenant: my_tenant
managed_filters:
prov_max_cpu:
- - '1'
- - '2'
- - '4'
+ - '1'
+ - '2'
+ - '4'
department:
- - defense
- - engineering
+ - defense
+ - engineering
managed_filters_merge_mode: replace
belongsto_filters:
- - "/belongsto/ExtManagementSystem|ProviderName/EmsFolder|Datacenters/EmsFolder|dc_name/EmsFolder|host/EmsCluster|Cluster name"
+ - "/belongsto/ExtManagementSystem|ProviderName/EmsFolder|Datacenters/EmsFolder|dc_name/EmsFolder|host/EmsCluster|Cluster name"
belongsto_filters_merge_mode: merge
manageiq_connection:
url: 'http://127.0.0.1:3000'
username: 'admin'
password: 'smartvm'
- validate_certs: false # only do this when you trust the network!
+ validate_certs: false # only do this when you trust the network!
- name: Delete a group in ManageIQ
community.general.manageiq_group:
@@ -161,53 +159,53 @@ EXAMPLES = '''
manageiq_connection:
url: 'http://127.0.0.1:3000'
token: 'sometoken'
-'''
+"""
-RETURN = '''
+RETURN = r"""
group:
description: The group.
returned: success
type: complex
contains:
description:
- description: The group description
+ description: The group description.
returned: success
type: str
id:
- description: The group id
+ description: The group ID.
returned: success
type: int
group_type:
- description: The group type, system or user
+ description: The group type, system or user.
returned: success
type: str
role:
- description: The group role name
+ description: The group role name.
returned: success
type: str
tenant:
- description: The group tenant name
+ description: The group tenant name.
returned: success
type: str
managed_filters:
- description: The tag values per category
+ description: The tag values per category.
returned: success
type: dict
belongsto_filters:
- description: A list of strings with a reference to the allowed host, cluster or folder
+ description: A list of strings with a reference to the allowed host, cluster or folder.
returned: success
type: list
created_on:
- description: Group creation date
+ description: Group creation date.
returned: success
type: str
sample: "2018-08-12T08:37:55+00:00"
updated_on:
- description: Group update date
+ description: Group update date.
returned: success
type: int
sample: "2018-08-12T08:37:55+00:00"
-'''
+"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.general.plugins.module_utils.manageiq import ManageIQ, manageiq_argument_spec
@@ -572,14 +570,14 @@ def main():
argument_spec = dict(
description=dict(required=True, type='str'),
state=dict(choices=['absent', 'present'], default='present'),
- role_id=dict(required=False, type='int'),
- role=dict(required=False, type='str'),
- tenant_id=dict(required=False, type='int'),
- tenant=dict(required=False, type='str'),
- managed_filters=dict(required=False, type='dict'),
- managed_filters_merge_mode=dict(required=False, choices=['merge', 'replace'], default='replace'),
- belongsto_filters=dict(required=False, type='list', elements='str'),
- belongsto_filters_merge_mode=dict(required=False, choices=['merge', 'replace'], default='replace'),
+ role_id=dict(type='int'),
+ role=dict(type='str'),
+ tenant_id=dict(type='int'),
+ tenant=dict(type='str'),
+ managed_filters=dict(type='dict'),
+ managed_filters_merge_mode=dict(choices=['merge', 'replace'], default='replace'),
+ belongsto_filters=dict(type='list', elements='str'),
+ belongsto_filters_merge_mode=dict(choices=['merge', 'replace'], default='replace'),
)
# add the manageiq connection arguments to the arguments
argument_spec.update(manageiq_argument_spec())
diff --git a/plugins/modules/manageiq_policies.py b/plugins/modules/manageiq_policies.py
index f2101ad28b..247e2dc94c 100644
--- a/plugins/modules/manageiq_policies.py
+++ b/plugins/modules/manageiq_policies.py
@@ -9,8 +9,7 @@ from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = '''
-
+DOCUMENTATION = r"""
module: manageiq_policies
short_description: Management of resource policy_profiles in ManageIQ
@@ -21,7 +20,6 @@ extends_documentation_fragment:
author: Daniel Korn (@dkorn)
description:
- The manageiq_policies module supports adding and deleting policy_profiles in ManageIQ.
-
attributes:
check_mode:
support: none
@@ -33,7 +31,7 @@ options:
type: str
description:
- V(absent) - policy_profiles should not exist,
- - V(present) - policy_profiles should exist,
+ - V(present) - policy_profiles should exist.
choices: ['absent', 'present']
default: 'present'
policy_profiles:
@@ -47,9 +45,21 @@ options:
description:
- The type of the resource to which the profile should be [un]assigned.
required: true
- choices: ['provider', 'host', 'vm', 'blueprint', 'category', 'cluster',
- 'data store', 'group', 'resource pool', 'service', 'service template',
- 'template', 'tenant', 'user']
+ choices:
+ - provider
+ - host
+ - vm
+ - blueprint
+ - category
+ - cluster
+ - data store
+ - group
+ - resource pool
+ - service
+ - service template
+ - template
+ - tenant
+ - user
resource_name:
type: str
description:
@@ -61,9 +71,9 @@ options:
- The ID of the resource to which the profile should be [un]assigned.
- Must be specified if O(resource_name) is not set. Both options are mutually exclusive.
version_added: 2.2.0
-'''
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Assign new policy_profile for a provider in ManageIQ
community.general.manageiq_policies:
resource_name: 'EngLab'
@@ -74,7 +84,7 @@ EXAMPLES = '''
url: 'http://127.0.0.1:3000'
username: 'admin'
password: 'smartvm'
- validate_certs: false # only do this when you trust the network!
+ validate_certs: false # only do this when you trust the network!
- name: Unassign a policy_profile for a provider in ManageIQ
community.general.manageiq_policies:
@@ -87,42 +97,43 @@ EXAMPLES = '''
url: 'http://127.0.0.1:3000'
username: 'admin'
password: 'smartvm'
- validate_certs: false # only do this when you trust the network!
-'''
+ validate_certs: false # only do this when you trust the network!
+"""
-RETURN = '''
+RETURN = r"""
manageiq_policies:
- description:
- - List current policy_profile and policies for a provider in ManageIQ
- returned: always
- type: dict
- sample: '{
- "changed": false,
- "profiles": [
+ description:
+ - List current policy_profile and policies for a provider in ManageIQ.
+ returned: always
+ type: dict
+ sample:
+ {
+ "changed": false,
+ "profiles": [
+ {
+ "policies": [
{
- "policies": [
- {
- "active": true,
- "description": "OpenSCAP",
- "name": "openscap policy"
- },
- {
- "active": true,
- "description": "Analyse incoming container images",
- "name": "analyse incoming container images"
- },
- {
- "active": true,
- "description": "Schedule compliance after smart state analysis",
- "name": "schedule compliance after smart state analysis"
- }
- ],
- "profile_description": "OpenSCAP profile",
- "profile_name": "openscap profile"
+ "active": true,
+ "description": "OpenSCAP",
+ "name": "openscap policy"
+ },
+ {
+ "active": true,
+ "description": "Analyse incoming container images",
+ "name": "analyse incoming container images"
+ },
+ {
+ "active": true,
+ "description": "Schedule compliance after smart state analysis",
+ "name": "schedule compliance after smart state analysis"
}
- ]
- }'
-'''
+ ],
+ "profile_description": "OpenSCAP profile",
+ "profile_name": "openscap profile"
+ }
+ ]
+ }
+"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.general.plugins.module_utils.manageiq import ManageIQ, manageiq_argument_spec, manageiq_entities
@@ -136,7 +147,7 @@ def main():
resource_name=dict(type='str'),
resource_type=dict(required=True, type='str',
choices=list(manageiq_entities().keys())),
- state=dict(required=False, type='str',
+ state=dict(type='str',
choices=['present', 'absent'], default='present'),
)
# add the manageiq connection arguments to the arguments
diff --git a/plugins/modules/manageiq_policies_info.py b/plugins/modules/manageiq_policies_info.py
index fda7dcadfe..2db694f11c 100644
--- a/plugins/modules/manageiq_policies_info.py
+++ b/plugins/modules/manageiq_policies_info.py
@@ -10,8 +10,7 @@ from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = '''
-
+DOCUMENTATION = r"""
module: manageiq_policies_info
version_added: 5.8.0
@@ -24,16 +23,27 @@ extends_documentation_fragment:
author: Alexei Znamensky (@russoz)
description:
- The manageiq_policies module supports listing policy_profiles in ManageIQ.
-
options:
resource_type:
type: str
description:
- The type of the resource to obtain the profile for.
required: true
- choices: ['provider', 'host', 'vm', 'blueprint', 'category', 'cluster',
- 'data store', 'group', 'resource pool', 'service', 'service template',
- 'template', 'tenant', 'user']
+ choices:
+ - provider
+ - host
+ - vm
+ - blueprint
+ - category
+ - cluster
+ - data store
+ - group
+ - resource pool
+ - service
+ - service template
+ - template
+ - tenant
+ - user
resource_name:
type: str
description:
@@ -44,9 +54,9 @@ options:
description:
- The ID of the resource to obtain the profile for.
- Must be specified if O(resource_name) is not set. Both options are mutually exclusive.
-'''
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: List current policy_profile and policies for a provider in ManageIQ
community.general.manageiq_policies_info:
resource_name: 'EngLab'
@@ -56,9 +66,9 @@ EXAMPLES = '''
username: 'admin'
password: 'smartvm'
register: result
-'''
+"""
-RETURN = '''
+RETURN = r"""
profiles:
description:
- List current policy_profile and policies for a provider in ManageIQ.
@@ -78,7 +88,7 @@ profiles:
name: schedule compliance after smart state analysis
profile_description: OpenSCAP profile
profile_name: openscap profile
-'''
+"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.general.plugins.module_utils.manageiq import ManageIQ, manageiq_argument_spec, manageiq_entities
@@ -86,8 +96,8 @@ from ansible_collections.community.general.plugins.module_utils.manageiq import
def main():
argument_spec = dict(
- resource_id=dict(required=False, type='int'),
- resource_name=dict(required=False, type='str'),
+ resource_id=dict(type='int'),
+ resource_name=dict(type='str'),
resource_type=dict(required=True, type='str',
choices=list(manageiq_entities().keys())),
)
diff --git a/plugins/modules/manageiq_provider.py b/plugins/modules/manageiq_provider.py
index 35c73a38b3..334555c29a 100644
--- a/plugins/modules/manageiq_provider.py
+++ b/plugins/modules/manageiq_provider.py
@@ -9,7 +9,7 @@ from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = '''
+DOCUMENTATION = r"""
module: manageiq_provider
short_description: Management of provider in ManageIQ
extends_documentation_fragment:
@@ -19,7 +19,6 @@ extends_documentation_fragment:
author: Daniel Korn (@dkorn)
description:
- The manageiq_provider module supports adding, updating, and deleting provider in ManageIQ.
-
attributes:
check_mode:
support: none
@@ -30,7 +29,9 @@ options:
state:
type: str
description:
- - absent - provider should not exist, present - provider should be present, refresh - provider will be refreshed
+ - V(absent) - provider should not exist,
+ - V(present) - provider should be present,
+ - V(refresh) - provider is refreshed.
choices: ['absent', 'present', 'refresh']
default: 'present'
name:
@@ -43,34 +44,34 @@ options:
choices: ['Openshift', 'Amazon', 'oVirt', 'VMware', 'Azure', 'Director', 'OpenStack', 'GCE']
zone:
type: str
- description: The ManageIQ zone name that will manage the provider.
+ description: The ManageIQ zone name that manages the provider.
default: 'default'
provider_region:
type: str
- description: The provider region name to connect to (e.g. AWS region for Amazon).
+ description: The provider region name to connect to (for example AWS region for Amazon).
host_default_vnc_port_start:
type: str
- description: The first port in the host VNC range. defaults to None.
+ description: The first port in the host VNC range.
host_default_vnc_port_end:
type: str
- description: The last port in the host VNC range. defaults to None.
+ description: The last port in the host VNC range.
subscription:
type: str
- description: Microsoft Azure subscription ID. defaults to None.
+ description: Microsoft Azure subscription ID.
project:
type: str
- description: Google Compute Engine Project ID. defaults to None.
+ description: Google Compute Engine Project ID.
azure_tenant_id:
type: str
- description: Tenant ID. defaults to None.
- aliases: [ keystone_v3_domain_id ]
+ description: Tenant ID. Defaults to V(null).
+ aliases: [keystone_v3_domain_id]
tenant_mapping_enabled:
type: bool
default: false
- description: Whether to enable mapping of existing tenants. defaults to False.
+ description: Whether to enable mapping of existing tenants.
api_version:
type: str
- description: The OpenStack Keystone API version. defaults to None.
+ description: The OpenStack Keystone API version.
choices: ['v2', 'v3']
provider:
@@ -79,32 +80,32 @@ options:
suboptions:
hostname:
type: str
- description: The provider's api hostname.
+ description: The provider's API hostname.
required: true
port:
type: int
- description: The provider's api port.
+ description: The provider's API port.
userid:
type: str
- description: Provider's api endpoint authentication userid. defaults to None.
+ description: Provider's API endpoint authentication userid.
password:
type: str
- description: Provider's api endpoint authentication password. defaults to None.
+ description: Provider's API endpoint authentication password.
auth_key:
type: str
- description: Provider's api endpoint authentication bearer token. defaults to None.
+ description: Provider's API endpoint authentication bearer token.
validate_certs:
- description: Whether SSL certificates should be verified for HTTPS requests (deprecated). defaults to True.
+ description: Whether SSL certificates should be verified for HTTPS requests (deprecated).
type: bool
default: true
- aliases: [ verify_ssl ]
+ aliases: [verify_ssl]
security_protocol:
type: str
- description: How SSL certificates should be used for HTTPS requests. defaults to None.
- choices: ['ssl-with-validation','ssl-with-validation-custom-ca','ssl-without-validation','non-ssl']
+ description: How SSL certificates should be used for HTTPS requests.
+ choices: ['ssl-with-validation', 'ssl-with-validation-custom-ca', 'ssl-without-validation', 'non-ssl']
certificate_authority:
type: str
- description: The CA bundle string with custom certificates. defaults to None.
+ description: The CA bundle string with custom certificates.
path:
type: str
description:
@@ -125,39 +126,38 @@ options:
type: str
description:
- TODO needs documentation.
-
metrics:
description: Metrics endpoint connection information.
type: dict
suboptions:
hostname:
type: str
- description: The provider's api hostname.
+ description: The provider's API hostname.
required: true
port:
type: int
- description: The provider's api port.
+ description: The provider's API port.
userid:
type: str
- description: Provider's api endpoint authentication userid. defaults to None.
+ description: Provider's API endpoint authentication userid.
password:
type: str
- description: Provider's api endpoint authentication password. defaults to None.
+ description: Provider's API endpoint authentication password.
auth_key:
type: str
- description: Provider's api endpoint authentication bearer token. defaults to None.
+ description: Provider's API endpoint authentication bearer token.
validate_certs:
- description: Whether SSL certificates should be verified for HTTPS requests (deprecated). defaults to True.
+ description: Whether SSL certificates should be verified for HTTPS requests (deprecated).
type: bool
default: true
- aliases: [ verify_ssl ]
+ aliases: [verify_ssl]
security_protocol:
type: str
- choices: ['ssl-with-validation','ssl-with-validation-custom-ca','ssl-without-validation','non-ssl']
- description: How SSL certificates should be used for HTTPS requests. defaults to None.
+ choices: ['ssl-with-validation', 'ssl-with-validation-custom-ca', 'ssl-without-validation', 'non-ssl']
+ description: How SSL certificates should be used for HTTPS requests.
certificate_authority:
type: str
- description: The CA bundle string with custom certificates. defaults to None.
+ description: The CA bundle string with custom certificates.
path:
type: str
description: Database name for oVirt metrics. Defaults to V(ovirt_engine_history).
@@ -177,39 +177,38 @@ options:
type: str
description:
- TODO needs documentation.
-
alerts:
description: Alerts endpoint connection information.
type: dict
suboptions:
hostname:
type: str
- description: The provider's api hostname.
+ description: The provider's API hostname.
required: true
port:
type: int
- description: The provider's api port.
+ description: The provider's API port.
userid:
type: str
- description: Provider's api endpoint authentication userid. defaults to None.
+ description: Provider's API endpoint authentication userid. Defaults to V(null).
password:
type: str
- description: Provider's api endpoint authentication password. defaults to None.
+ description: Provider's API endpoint authentication password. Defaults to V(null).
auth_key:
type: str
- description: Provider's api endpoint authentication bearer token. defaults to None.
+ description: Provider's API endpoint authentication bearer token. Defaults to V(null).
validate_certs:
type: bool
- description: Whether SSL certificates should be verified for HTTPS requests (deprecated). defaults to True.
+ description: Whether SSL certificates should be verified for HTTPS requests (deprecated). Defaults to V(true).
default: true
- aliases: [ verify_ssl ]
+ aliases: [verify_ssl]
security_protocol:
type: str
- choices: ['ssl-with-validation','ssl-with-validation-custom-ca','ssl-without-validation', 'non-ssl']
- description: How SSL certificates should be used for HTTPS requests. defaults to None.
+ choices: ['ssl-with-validation', 'ssl-with-validation-custom-ca', 'ssl-without-validation', 'non-ssl']
+ description: How SSL certificates should be used for HTTPS requests. Defaults to V(null).
certificate_authority:
type: str
- description: The CA bundle string with custom certificates. defaults to None.
+ description: The CA bundle string with custom certificates. Defaults to V(null).
path:
type: str
description:
@@ -230,7 +229,6 @@ options:
type: str
description:
- TODO needs documentation.
-
ssh_keypair:
description: SSH key pair used for SSH connections to all hosts in this provider.
type: dict
@@ -250,10 +248,10 @@ options:
- Whether certificates should be verified for connections.
type: bool
default: true
- aliases: [ verify_ssl ]
+ aliases: [verify_ssl]
security_protocol:
type: str
- choices: ['ssl-with-validation','ssl-with-validation-custom-ca','ssl-without-validation', 'non-ssl']
+ choices: ['ssl-with-validation', 'ssl-with-validation-custom-ca', 'ssl-without-validation', 'non-ssl']
description:
- TODO needs documentation.
certificate_authority:
@@ -288,9 +286,9 @@ options:
type: int
description:
- TODO needs documentation.
-'''
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Create a new provider in ManageIQ ('Hawkular' metrics)
community.general.manageiq_provider:
name: 'EngLab'
@@ -507,10 +505,10 @@ EXAMPLES = '''
hostname: 'gce.example.com'
auth_key: 'google_json_key'
validate_certs: 'false'
-'''
+"""
-RETURN = '''
-'''
+RETURN = r"""
+"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.general.plugins.module_utils.manageiq import ManageIQ, manageiq_argument_spec
diff --git a/plugins/modules/manageiq_tags.py b/plugins/modules/manageiq_tags.py
index 3ab5eca4f8..efd135393d 100644
--- a/plugins/modules/manageiq_tags.py
+++ b/plugins/modules/manageiq_tags.py
@@ -9,8 +9,7 @@ from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = '''
-
+DOCUMENTATION = r"""
module: manageiq_tags
short_description: Management of resource tags in ManageIQ
@@ -21,7 +20,6 @@ extends_documentation_fragment:
author: Daniel Korn (@dkorn)
description:
- The manageiq_tags module supports adding, updating and deleting tags in ManageIQ.
-
attributes:
check_mode:
support: none
@@ -32,7 +30,7 @@ options:
state:
type: str
description:
- - V(absent) - tags should not exist.
+ - V(absent) - tags should not exist,
- V(present) - tags should exist.
choices: ['absent', 'present']
default: 'present'
@@ -47,52 +45,64 @@ options:
description:
- The relevant resource type in manageiq.
required: true
- choices: ['provider', 'host', 'vm', 'blueprint', 'category', 'cluster',
- 'data store', 'group', 'resource pool', 'service', 'service template',
- 'template', 'tenant', 'user']
+ choices:
+ - provider
+ - host
+ - vm
+ - blueprint
+ - category
+ - cluster
+ - data store
+ - group
+ - resource pool
+ - service
+ - service template
+ - template
+ - tenant
+ - user
resource_name:
type: str
description:
- - The name of the resource at which tags will be controlled.
+ - The name of the resource at which tags are be controlled.
- Must be specified if O(resource_id) is not set. Both options are mutually exclusive.
resource_id:
description:
- - The ID of the resource at which tags will be controlled.
+ - The ID of the resource at which tags are controlled.
- Must be specified if O(resource_name) is not set. Both options are mutually exclusive.
type: int
version_added: 2.2.0
-'''
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Create new tags for a provider in ManageIQ.
community.general.manageiq_tags:
resource_name: 'EngLab'
resource_type: 'provider'
tags:
- - category: environment
- name: prod
- - category: owner
- name: prod_ops
+ - category: environment
+ name: prod
+ - category: owner
+ name: prod_ops
manageiq_connection:
url: 'http://127.0.0.1:3000'
username: 'admin'
password: 'smartvm'
- validate_certs: false # only do this when connecting to localhost!
+ validate_certs: false # only do this when connecting to localhost!
- name: Create new tags for a provider in ManageIQ.
community.general.manageiq_tags:
resource_id: 23000000790497
resource_type: 'provider'
tags:
- - category: environment
- name: prod
- - category: owner
- name: prod_ops
+ - category: environment
+ name: prod
+ - category: owner
+ name: prod_ops
manageiq_connection:
url: 'http://127.0.0.1:3000'
username: 'admin'
password: 'smartvm'
- validate_certs: false # only do this when connecting to localhost!
+ validate_certs: false # only do this when connecting to localhost!
- name: Remove tags for a provider in ManageIQ.
community.general.manageiq_tags:
@@ -100,19 +110,19 @@ EXAMPLES = '''
resource_name: 'EngLab'
resource_type: 'provider'
tags:
- - category: environment
- name: prod
- - category: owner
- name: prod_ops
+ - category: environment
+ name: prod
+ - category: owner
+ name: prod_ops
manageiq_connection:
url: 'http://127.0.0.1:3000'
username: 'admin'
password: 'smartvm'
- validate_certs: false # only do this when connecting to localhost!
-'''
+ validate_certs: false # only do this when connecting to localhost!
+"""
-RETURN = '''
-'''
+RETURN = r"""
+"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.general.plugins.module_utils.manageiq import (
@@ -128,7 +138,7 @@ def main():
resource_name=dict(type='str'),
resource_type=dict(required=True, type='str',
choices=list(manageiq_entities().keys())),
- state=dict(required=False, type='str',
+ state=dict(type='str',
choices=['present', 'absent'], default='present'),
)
# add the manageiq connection arguments to the arguments
diff --git a/plugins/modules/manageiq_tags_info.py b/plugins/modules/manageiq_tags_info.py
index 75e111540b..2a742f69c5 100644
--- a/plugins/modules/manageiq_tags_info.py
+++ b/plugins/modules/manageiq_tags_info.py
@@ -9,8 +9,7 @@ from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = '''
-
+DOCUMENTATION = r"""
module: manageiq_tags_info
version_added: 5.8.0
short_description: Retrieve resource tags in ManageIQ
@@ -22,29 +21,40 @@ extends_documentation_fragment:
author: Alexei Znamensky (@russoz)
description:
- This module supports retrieving resource tags from ManageIQ.
-
options:
resource_type:
type: str
description:
- The relevant resource type in ManageIQ.
required: true
- choices: ['provider', 'host', 'vm', 'blueprint', 'category', 'cluster',
- 'data store', 'group', 'resource pool', 'service', 'service template',
- 'template', 'tenant', 'user']
+ choices:
+ - provider
+ - host
+ - vm
+ - blueprint
+ - category
+ - cluster
+ - data store
+ - group
+ - resource pool
+ - service
+ - service template
+ - template
+ - tenant
+ - user
resource_name:
type: str
description:
- - The name of the resource at which tags will be controlled.
+ - The name of the resource at which tags are controlled.
- Must be specified if O(resource_id) is not set. Both options are mutually exclusive.
resource_id:
description:
- - The ID of the resource at which tags will be controlled.
+ - The ID of the resource at which tags are controlled.
- Must be specified if O(resource_name) is not set. Both options are mutually exclusive.
type: int
-'''
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: List current tags for a provider in ManageIQ.
community.general.manageiq_tags_info:
resource_name: 'EngLab'
@@ -54,15 +64,15 @@ EXAMPLES = '''
username: 'admin'
password: 'smartvm'
register: result
-'''
+"""
-RETURN = '''
+RETURN = r"""
tags:
description: List of tags associated with the resource.
returned: on success
type: list
elements: dict
-'''
+"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.general.plugins.module_utils.manageiq import (
diff --git a/plugins/modules/manageiq_tenant.py b/plugins/modules/manageiq_tenant.py
index a5a56191e7..fda97509ce 100644
--- a/plugins/modules/manageiq_tenant.py
+++ b/plugins/modules/manageiq_tenant.py
@@ -8,8 +8,7 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
-DOCUMENTATION = '''
-
+DOCUMENTATION = r"""
module: manageiq_tenant
short_description: Management of tenants in ManageIQ
@@ -31,7 +30,8 @@ options:
state:
type: str
description:
- - absent - tenant should not exist, present - tenant should be.
+ - V(absent) - tenant should not exist,
+ - V(present) - tenant should be.
choices: ['absent', 'present']
default: 'present'
name:
@@ -39,42 +39,42 @@ options:
description:
- The tenant name.
required: true
- default: null
+ default:
description:
type: str
description:
- - The tenant description.
+ - The tenant description.
required: true
- default: null
+ default:
parent_id:
type: int
description:
- - The id of the parent tenant. If not supplied the root tenant is used.
- - The O(parent_id) takes president over O(parent) when supplied
+ - The ID of the parent tenant. If not supplied the root tenant is used.
+ - The O(parent_id) takes president over O(parent) when supplied.
required: false
- default: null
+ default:
parent:
type: str
description:
- - The name of the parent tenant. If not supplied and no O(parent_id) is supplied the root tenant is used.
+ - The name of the parent tenant. If not supplied and no O(parent_id) is supplied the root tenant is used.
required: false
- default: null
+ default:
quotas:
type: dict
description:
- - The tenant quotas.
- - All parameters case sensitive.
- - 'Valid attributes are:'
- - ' - C(cpu_allocated) (int): use null to remove the quota.'
- - ' - C(mem_allocated) (GB): use null to remove the quota.'
- - ' - C(storage_allocated) (GB): use null to remove the quota.'
- - ' - C(vms_allocated) (int): use null to remove the quota.'
- - ' - C(templates_allocated) (int): use null to remove the quota.'
+ - The tenant quotas.
+ - All parameters case sensitive.
+ - 'Valid attributes are:'
+ - '- V(cpu_allocated) (int): use null to remove the quota.'
+ - '- V(mem_allocated) (GB): use null to remove the quota.'
+ - '- V(storage_allocated) (GB): use null to remove the quota.'
+ - '- V(vms_allocated) (int): use null to remove the quota.'
+ - '- V(templates_allocated) (int): use null to remove the quota.'
required: false
default: {}
-'''
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Update the root tenant in ManageIQ
community.general.manageiq_tenant:
name: 'My Company'
@@ -83,7 +83,7 @@ EXAMPLES = '''
url: 'http://127.0.0.1:3000'
username: 'admin'
password: 'smartvm'
- validate_certs: false # only do this when you trust the network!
+ validate_certs: false # only do this when you trust the network!
- name: Create a tenant in ManageIQ
community.general.manageiq_tenant:
@@ -94,7 +94,7 @@ EXAMPLES = '''
url: 'http://127.0.0.1:3000'
username: 'admin'
password: 'smartvm'
- validate_certs: false # only do this when you trust the network!
+ validate_certs: false # only do this when you trust the network!
- name: Delete a tenant in ManageIQ
community.general.manageiq_tenant:
@@ -105,7 +105,7 @@ EXAMPLES = '''
url: 'http://127.0.0.1:3000'
username: 'admin'
password: 'smartvm'
- validate_certs: false # only do this when you trust the network!
+ validate_certs: false # only do this when you trust the network!
- name: Set tenant quota for cpu_allocated, mem_allocated, remove quota for vms_allocated
community.general.manageiq_tenant:
@@ -114,12 +114,12 @@ EXAMPLES = '''
quotas:
- cpu_allocated: 100
- mem_allocated: 50
- - vms_allocated: null
+ - vms_allocated:
manageiq_connection:
url: 'http://127.0.0.1:3000'
username: 'admin'
password: 'smartvm'
- validate_certs: false # only do this when you trust the network!
+ validate_certs: false # only do this when you trust the network!
- name: Delete a tenant in ManageIQ using a token
@@ -130,39 +130,39 @@ EXAMPLES = '''
manageiq_connection:
url: 'http://127.0.0.1:3000'
token: 'sometoken'
- validate_certs: false # only do this when you trust the network!
-'''
+ validate_certs: false # only do this when you trust the network!
+"""
-RETURN = '''
+RETURN = r"""
tenant:
description: The tenant.
returned: success
type: complex
contains:
id:
- description: The tenant id
+ description: The tenant ID.
returned: success
type: int
name:
- description: The tenant name
+ description: The tenant name.
returned: success
type: str
description:
- description: The tenant description
+ description: The tenant description.
returned: success
type: str
parent_id:
- description: The id of the parent tenant
+ description: The ID of the parent tenant.
returned: success
type: int
quotas:
- description: List of tenant quotas
+ description: List of tenant quotas.
returned: success
type: list
sample:
cpu_allocated: 100
mem_allocated: 50
-'''
+"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.general.plugins.module_utils.manageiq import ManageIQ, manageiq_argument_spec
@@ -482,8 +482,8 @@ def main():
argument_spec = dict(
name=dict(required=True, type='str'),
description=dict(required=True, type='str'),
- parent_id=dict(required=False, type='int'),
- parent=dict(required=False, type='str'),
+ parent_id=dict(type='int'),
+ parent=dict(type='str'),
state=dict(choices=['absent', 'present'], default='present'),
quotas=dict(type='dict', default={})
)
diff --git a/plugins/modules/manageiq_user.py b/plugins/modules/manageiq_user.py
index 0d8a81984f..475086c823 100644
--- a/plugins/modules/manageiq_user.py
+++ b/plugins/modules/manageiq_user.py
@@ -8,8 +8,7 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
-DOCUMENTATION = '''
-
+DOCUMENTATION = r"""
module: manageiq_user
short_description: Management of users in ManageIQ
@@ -20,7 +19,6 @@ extends_documentation_fragment:
author: Daniel Korn (@dkorn)
description:
- The manageiq_user module supports adding, updating and deleting users in ManageIQ.
-
attributes:
check_mode:
support: none
@@ -31,7 +29,8 @@ options:
state:
type: str
description:
- - absent - user should not exist, present - user should be.
+ - V(absent) - user should not exist,
+ - V(present) - user should be.
choices: ['absent', 'present']
default: 'present'
userid:
@@ -60,10 +59,11 @@ options:
default: always
choices: ['always', 'on_create']
description:
- - V(always) will update passwords unconditionally. V(on_create) will only set the password for a newly created user.
-'''
+ - V(always) updates passwords unconditionally.
+ - V(on_create) only sets the password for a newly created user.
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Create a new user in ManageIQ
community.general.manageiq_user:
userid: 'jdoe'
@@ -75,7 +75,7 @@ EXAMPLES = '''
url: 'http://127.0.0.1:3000'
username: 'admin'
password: 'smartvm'
- validate_certs: false # only do this when you trust the network!
+ validate_certs: false # only do this when you trust the network!
- name: Create a new user in ManageIQ using a token
community.general.manageiq_user:
@@ -87,7 +87,7 @@ EXAMPLES = '''
manageiq_connection:
url: 'http://127.0.0.1:3000'
token: 'sometoken'
- validate_certs: false # only do this when you trust the network!
+ validate_certs: false # only do this when you trust the network!
- name: Delete a user in ManageIQ
community.general.manageiq_user:
@@ -97,7 +97,7 @@ EXAMPLES = '''
url: 'http://127.0.0.1:3000'
username: 'admin'
password: 'smartvm'
- validate_certs: false # only do this when you trust the network!
+ validate_certs: false # only do this when you trust the network!
- name: Delete a user in ManageIQ using a token
community.general.manageiq_user:
@@ -106,7 +106,7 @@ EXAMPLES = '''
manageiq_connection:
url: 'http://127.0.0.1:3000'
token: 'sometoken'
- validate_certs: false # only do this when you trust the network!
+ validate_certs: false # only do this when you trust the network!
- name: Update email of user in ManageIQ
community.general.manageiq_user:
@@ -116,7 +116,7 @@ EXAMPLES = '''
url: 'http://127.0.0.1:3000'
username: 'admin'
password: 'smartvm'
- validate_certs: false # only do this when you trust the network!
+ validate_certs: false # only do this when you trust the network!
- name: Update email of user in ManageIQ using a token
community.general.manageiq_user:
@@ -125,11 +125,11 @@ EXAMPLES = '''
manageiq_connection:
url: 'http://127.0.0.1:3000'
token: 'sometoken'
- validate_certs: false # only do this when you trust the network!
-'''
+ validate_certs: false # only do this when you trust the network!
+"""
-RETURN = '''
-'''
+RETURN = r"""
+"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.general.plugins.module_utils.manageiq import ManageIQ, manageiq_argument_spec
diff --git a/plugins/modules/mas.py b/plugins/modules/mas.py
index 8bb80840ca..3659c97636 100644
--- a/plugins/modules/mas.py
+++ b/plugins/modules/mas.py
@@ -10,54 +10,54 @@ from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = '''
+DOCUMENTATION = r"""
module: mas
short_description: Manage Mac App Store applications with mas-cli
description:
- - Installs, uninstalls and updates macOS applications from the Mac App Store using the C(mas-cli).
+ - Installs, uninstalls and updates macOS applications from the Mac App Store using the C(mas-cli).
version_added: '0.2.0'
author:
- - Michael Heap (@mheap)
- - Lukas Bestle (@lukasbestle)
+ - Michael Heap (@mheap)
+ - Lukas Bestle (@lukasbestle)
extends_documentation_fragment:
- - community.general.attributes
+ - community.general.attributes
attributes:
- check_mode:
- support: full
- diff_mode:
- support: none
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
options:
- id:
- description:
- - The Mac App Store identifier of the app(s) you want to manage.
- - This can be found by running C(mas search APP_NAME) on your machine.
- type: list
- elements: int
- state:
- description:
- - Desired state of the app installation.
- - The V(absent) value requires root permissions, also see the examples.
- type: str
- choices:
- - absent
- - latest
- - present
- default: present
- upgrade_all:
- description:
- - Upgrade all installed Mac App Store apps.
- type: bool
- default: false
- aliases: ["upgrade"]
+ id:
+ description:
+ - The Mac App Store identifier of the app(s) you want to manage.
+ - This can be found by running C(mas search APP_NAME) on your machine.
+ type: list
+ elements: int
+ state:
+ description:
+ - Desired state of the app installation.
+ - The V(absent) value requires root permissions, also see the examples.
+ type: str
+ choices:
+ - absent
+ - latest
+ - present
+ default: present
+ upgrade_all:
+ description:
+ - Upgrade all installed Mac App Store apps.
+ type: bool
+ default: false
+ aliases: ["upgrade"]
requirements:
- - macOS 10.11+
- - "mas-cli (U(https://github.com/mas-cli/mas)) 1.5.0+ available as C(mas) in the bin path"
- - The Apple ID to use already needs to be signed in to the Mac App Store (check with C(mas account)).
- - The feature of "checking if user is signed in" is disabled for anyone using macOS 12.0+.
- - Users need to sign in via the Mac App Store GUI beforehand for anyone using macOS 12.0+ due to U(https://github.com/mas-cli/mas/issues/417).
-'''
+ - macOS 10.11 or higher.
+ - "mas-cli (U(https://github.com/mas-cli/mas)) 1.5.0+ available as C(mas) in the bin path"
+ - The Apple ID to use already needs to be signed in to the Mac App Store (check with C(mas account)).
+ - The feature of "checking if user is signed in" is disabled for anyone using macOS 12.0+.
+ - Users need to sign in to the Mac App Store GUI beforehand for anyone using macOS 12.0+ due to U(https://github.com/mas-cli/mas/issues/417).
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Install Keynote
community.general.mas:
id: 409183694
@@ -99,9 +99,9 @@ EXAMPLES = '''
id: 413857545
state: absent
become: true # Uninstallation requires root permissions
-'''
+"""
-RETURN = r''' # '''
+RETURN = r""" # """
from ansible.module_utils.basic import AnsibleModule
import os
diff --git a/plugins/modules/matrix.py b/plugins/modules/matrix.py
index 0b419c8d93..fb6c797bff 100644
--- a/plugins/modules/matrix.py
+++ b/plugins/modules/matrix.py
@@ -8,58 +8,57 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
author: "Jan Christian Grünhage (@jcgruenhage)"
module: matrix
short_description: Send notifications to matrix
description:
- - This module sends html formatted notifications to matrix rooms.
+ - This module sends HTML formatted notifications to matrix rooms.
extends_documentation_fragment:
- - community.general.attributes
+ - community.general.attributes
attributes:
- check_mode:
- support: full
- diff_mode:
- support: none
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
options:
- msg_plain:
- type: str
- description:
- - Plain text form of the message to send to matrix, usually markdown
- required: true
- msg_html:
- type: str
- description:
- - HTML form of the message to send to matrix
- required: true
- room_id:
- type: str
- description:
- - ID of the room to send the notification to
- required: true
- hs_url:
- type: str
- description:
- - URL of the homeserver, where the CS-API is reachable
- required: true
- token:
- type: str
- description:
- - Authentication token for the API call. If provided, user_id and password are not required
- user_id:
- type: str
- description:
- - The user id of the user
- password:
- type: str
- description:
- - The password to log in with
+ msg_plain:
+ type: str
+ description:
+ - Plain text form of the message to send to matrix, usually markdown.
+ required: true
+ msg_html:
+ type: str
+ description:
+ - HTML form of the message to send to matrix.
+ required: true
+ room_id:
+ type: str
+ description:
+ - ID of the room to send the notification to.
+ required: true
+ hs_url:
+ type: str
+ description:
+ - URL of the homeserver, where the CS-API is reachable.
+ required: true
+ token:
+ type: str
+ description:
+ - Authentication token for the API call. If provided, O(user_id) and O(password) are not required.
+ user_id:
+ type: str
+ description:
+ - The user ID of the user.
+ password:
+ type: str
+ description:
+ - The password to log in with.
requirements:
- - matrix-client (Python library)
-'''
+ - matrix-client (Python library)
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Send matrix notification with token
community.general.matrix:
msg_plain: "**hello world**"
@@ -76,10 +75,10 @@ EXAMPLES = '''
hs_url: "https://matrix.org"
user_id: "ansible_notification_bot"
password: "{{ matrix_auth_password }}"
-'''
+"""
-RETURN = '''
-'''
+RETURN = r"""
+"""
import traceback
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
@@ -100,9 +99,9 @@ def run_module():
msg_html=dict(type='str', required=True),
room_id=dict(type='str', required=True),
hs_url=dict(type='str', required=True),
- token=dict(type='str', required=False, no_log=True),
- user_id=dict(type='str', required=False),
- password=dict(type='str', required=False, no_log=True),
+ token=dict(type='str', no_log=True),
+ user_id=dict(type='str'),
+ password=dict(type='str', no_log=True),
)
result = dict(
diff --git a/plugins/modules/mattermost.py b/plugins/modules/mattermost.py
index 154040a8fd..4cb32c1f3b 100644
--- a/plugins/modules/mattermost.py
+++ b/plugins/modules/mattermost.py
@@ -15,14 +15,14 @@ from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = '''
+DOCUMENTATION = r"""
module: mattermost
short_description: Send Mattermost notifications
description:
- - Sends notifications to U(http://your.mattermost.url) via the Incoming WebHook integration.
+ - Sends notifications to U(http://your.mattermost.url) using the Incoming WebHook integration.
author: "Benjamin Jolivot (@bjolivot)"
extends_documentation_fragment:
- - community.general.attributes
+ - community.general.attributes
attributes:
check_mode:
support: full
@@ -32,15 +32,13 @@ options:
url:
type: str
description:
- - Mattermost url (i.e. http://mattermost.yourcompany.com).
+ - Mattermost URL (for example V(http://mattermost.yourcompany.com)).
required: true
api_key:
type: str
description:
- - Mattermost webhook api key. Log into your mattermost site, go to
- Menu -> Integration -> Incoming Webhook -> Add Incoming Webhook.
- This will give you full URL. O(api_key) is the last part.
- http://mattermost.example.com/hooks/C(API_KEY)
+ - Mattermost webhook API key. Log into your Mattermost site, go to Menu -> Integration -> Incoming Webhook -> Add Incoming
+ Webhook. This gives you a full URL. O(api_key) is the last part. U(http://mattermost.example.com/hooks/API_KEY).
required: true
text:
type: str
@@ -62,22 +60,28 @@ options:
username:
type: str
description:
- - This is the sender of the message (Username Override need to be enabled by mattermost admin, see mattermost doc.
+ - This is the sender of the message (Username Override need to be enabled by mattermost admin, see mattermost doc).
default: Ansible
icon_url:
type: str
description:
- URL for the message sender's icon.
default: https://docs.ansible.com/favicon.ico
+ priority:
+ type: str
+ description:
+ - Set a priority for the message.
+ choices: [important, urgent]
+ version_added: 10.0.0
validate_certs:
description:
- - If V(false), SSL certificates will not be validated. This should only be used
- on personally controlled sites using self-signed certificates.
+ - If V(false), SSL certificates are not validated. This should only be used on personally controlled sites using self-signed
+ certificates.
default: true
type: bool
-'''
+"""
-EXAMPLES = """
+EXAMPLES = r"""
- name: Send notification message via Mattermost
community.general.mattermost:
url: http://mattermost.example.com
@@ -92,6 +96,7 @@ EXAMPLES = """
channel: notifications
username: 'Ansible on {{ inventory_hostname }}'
icon_url: http://www.example.com/some-image-file.png
+ priority: important
- name: Send attachments message via Mattermost
community.general.mattermost:
@@ -110,16 +115,16 @@ EXAMPLES = """
short: true
"""
-RETURN = '''
+RETURN = r"""
payload:
- description: Mattermost payload
- returned: success
- type: str
+ description: Mattermost payload.
+ returned: success
+ type: str
webhook_url:
- description: URL the webhook is sent to
- returned: success
- type: str
-'''
+ description: URL the webhook is sent to.
+ returned: success
+ type: str
+"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.urls import fetch_url
@@ -132,9 +137,10 @@ def main():
url=dict(type='str', required=True),
api_key=dict(type='str', required=True, no_log=True),
text=dict(type='str'),
- channel=dict(type='str', default=None),
+ channel=dict(type='str'),
username=dict(type='str', default='Ansible'),
icon_url=dict(type='str', default='https://docs.ansible.com/favicon.ico'),
+ priority=dict(type='str', choices=['important', 'urgent']),
validate_certs=dict(default=True, type='bool'),
attachments=dict(type='list', elements='dict'),
),
@@ -154,6 +160,8 @@ def main():
for param in ['text', 'channel', 'username', 'icon_url', 'attachments']:
if module.params[param] is not None:
payload[param] = module.params[param]
+ if module.params['priority'] is not None:
+ payload['priority'] = {'priority': module.params['priority']}
payload = module.jsonify(payload)
result['payload'] = payload
diff --git a/plugins/modules/maven_artifact.py b/plugins/modules/maven_artifact.py
index e239b4a164..af3be70f39 100644
--- a/plugins/modules/maven_artifact.py
+++ b/plugins/modules/maven_artifact.py
@@ -11,171 +11,168 @@
from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = r'''
----
+DOCUMENTATION = r"""
module: maven_artifact
short_description: Downloads an Artifact from a Maven Repository
description:
- - Downloads an artifact from a maven repository given the maven coordinates provided to the module.
- - Can retrieve snapshots or release versions of the artifact and will resolve the latest available
- version if one is not available.
+ - Downloads an artifact from a maven repository given the maven coordinates provided to the module.
+ - Can retrieve snapshots or release versions of the artifact and resolve the latest available version if one is not available.
author: "Chris Schmidt (@chrisisbeef)"
requirements:
- - lxml
- - boto if using a S3 repository (V(s3://...))
+ - lxml
+ - boto if using a S3 repository (V(s3://...))
attributes:
- check_mode:
- support: none
- diff_mode:
- support: none
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
options:
- group_id:
- type: str
- description:
- - The Maven groupId coordinate.
- required: true
- artifact_id:
- type: str
- description:
- - The maven artifactId coordinate.
- required: true
- version:
- type: str
- description:
- - The maven version coordinate.
- - Mutually exclusive with O(version_by_spec).
- version_by_spec:
- type: str
- description:
- - The maven dependency version ranges.
- - See supported version ranges on U(https://cwiki.apache.org/confluence/display/MAVENOLD/Dependency+Mediation+and+Conflict+Resolution)
- - The range type V((,1.0],[1.2,\)) and V((,1.1\),(1.1,\)) is not supported.
- - Mutually exclusive with O(version).
- version_added: '0.2.0'
- classifier:
- type: str
- description:
- - The maven classifier coordinate.
- default: ''
- extension:
- type: str
- description:
- - The maven type/extension coordinate.
- default: jar
- repository_url:
- type: str
- description:
- - The URL of the Maven Repository to download from.
- - Use V(s3://...) if the repository is hosted on Amazon S3.
- - Use V(file://...) if the repository is local.
- default: https://repo1.maven.org/maven2
- username:
- type: str
- description:
- - The username to authenticate as to the Maven Repository. Use AWS secret key of the repository is hosted on S3.
- aliases: [ "aws_secret_key" ]
- password:
- type: str
- description:
- - The password to authenticate with to the Maven Repository. Use AWS secret access key of the repository is hosted on S3.
- aliases: [ "aws_secret_access_key" ]
- headers:
- description:
- - Add custom HTTP headers to a request in hash/dict format.
- type: dict
- force_basic_auth:
- description:
- - httplib2, the library used by the uri module only sends authentication information when a webservice
- responds to an initial request with a 401 status. Since some basic auth services do not properly
- send a 401, logins will fail. This option forces the sending of the Basic authentication header
- upon initial request.
- default: false
- type: bool
- version_added: '0.2.0'
- dest:
- type: path
- description:
- - The path where the artifact should be written to.
- - If file mode or ownerships are specified and destination path already exists, they affect the downloaded file.
- required: true
- state:
- type: str
- description:
- - The desired state of the artifact.
- default: present
- choices: [present,absent]
- timeout:
- type: int
- description:
- - Specifies a timeout in seconds for the connection attempt.
- default: 10
- validate_certs:
- description:
- - If V(false), SSL certificates will not be validated. This should only be set to V(false) when no other option exists.
- type: bool
- default: true
- client_cert:
- description:
- - PEM formatted certificate chain file to be used for SSL client authentication.
- - This file can also include the key as well, and if the key is included, O(client_key) is not required.
- type: path
- version_added: '1.3.0'
- client_key:
- description:
- - PEM formatted file that contains your private key to be used for SSL client authentication.
- - If O(client_cert) contains both the certificate and key, this option is not required.
- type: path
- version_added: '1.3.0'
- keep_name:
- description:
- - If V(true), the downloaded artifact's name is preserved, i.e the version number remains part of it.
- - This option only has effect when O(dest) is a directory and O(version) is set to V(latest) or O(version_by_spec)
- is defined.
- type: bool
- default: false
- verify_checksum:
- type: str
- description:
- - If V(never), the MD5/SHA1 checksum will never be downloaded and verified.
- - If V(download), the MD5/SHA1 checksum will be downloaded and verified only after artifact download. This is the default.
- - If V(change), the MD5/SHA1 checksum will be downloaded and verified if the destination already exist,
- to verify if they are identical. This was the behaviour before 2.6. Since it downloads the checksum before (maybe)
- downloading the artifact, and since some repository software, when acting as a proxy/cache, return a 404 error
- if the artifact has not been cached yet, it may fail unexpectedly.
- If you still need it, you should consider using V(always) instead - if you deal with a checksum, it is better to
- use it to verify integrity after download.
- - V(always) combines V(download) and V(change).
- required: false
- default: 'download'
- choices: ['never', 'download', 'change', 'always']
- checksum_alg:
- type: str
- description:
- - If V(md5), checksums will use the MD5 algorithm. This is the default.
- - If V(sha1), checksums will use the SHA1 algorithm. This can be used on systems configured to use
- FIPS-compliant algorithms, since MD5 will be blocked on such systems.
- default: 'md5'
- choices: ['md5', 'sha1']
- version_added: 3.2.0
- unredirected_headers:
- type: list
- elements: str
- version_added: 5.2.0
- description:
- - A list of headers that should not be included in the redirection. This headers are sent to the C(fetch_url) function.
- - On ansible-core version 2.12 or later, the default of this option is V([Authorization, Cookie]).
- - Useful if the redirection URL does not need to have sensitive headers in the request.
- - Requires ansible-core version 2.12 or later.
- directory_mode:
- type: str
- description:
- - Filesystem permission mode applied recursively to O(dest) when it is a directory.
+ group_id:
+ type: str
+ description:
+ - The Maven groupId coordinate.
+ required: true
+ artifact_id:
+ type: str
+ description:
+ - The maven artifactId coordinate.
+ required: true
+ version:
+ type: str
+ description:
+ - The maven version coordinate.
+ - Mutually exclusive with O(version_by_spec).
+ version_by_spec:
+ type: str
+ description:
+ - The maven dependency version ranges.
+ - See supported version ranges on U(https://cwiki.apache.org/confluence/display/MAVENOLD/Dependency+Mediation+and+Conflict+Resolution).
+ - The range type V((,1.0],[1.2,\)) and V((,1.1\),(1.1,\)) is not supported.
+ - Mutually exclusive with O(version).
+ version_added: '0.2.0'
+ classifier:
+ type: str
+ description:
+ - The maven classifier coordinate.
+ default: ''
+ extension:
+ type: str
+ description:
+ - The maven type/extension coordinate.
+ default: jar
+ repository_url:
+ type: str
+ description:
+ - The URL of the Maven Repository to download from.
+ - Use V(s3://...) if the repository is hosted on Amazon S3.
+ - Use V(file://...) if the repository is local.
+ default: https://repo1.maven.org/maven2
+ username:
+ type: str
+ description:
+ - The username to authenticate as to the Maven Repository. Use AWS secret key of the repository is hosted on S3.
+ aliases: ["aws_secret_key"]
+ password:
+ type: str
+ description:
+ - The password to authenticate with to the Maven Repository. Use AWS secret access key of the repository is hosted on
+ S3.
+ aliases: ["aws_secret_access_key"]
+ headers:
+ description:
+ - Add custom HTTP headers to a request in hash/dict format.
+ type: dict
+ force_basic_auth:
+ description:
+ - C(httplib2), the library used by the URI module only sends authentication information when a webservice responds to
+ an initial request with a 401 status. Since some basic auth services do not properly send a 401, logins fail. This
+ option forces the sending of the Basic authentication header upon initial request.
+ default: false
+ type: bool
+ version_added: '0.2.0'
+ dest:
+ type: path
+ description:
+ - The path where the artifact should be written to.
+ - If file mode or ownerships are specified and destination path already exists, they affect the downloaded file.
+ required: true
+ state:
+ type: str
+ description:
+ - The desired state of the artifact.
+ default: present
+ choices: [present, absent]
+ timeout:
+ type: int
+ description:
+ - Specifies a timeout in seconds for the connection attempt.
+ default: 10
+ validate_certs:
+ description:
+ - If V(false), SSL certificates are not validated. This should only be set to V(false) when no other option exists.
+ type: bool
+ default: true
+ client_cert:
+ description:
+ - PEM formatted certificate chain file to be used for SSL client authentication.
+ - This file can also include the key as well, and if the key is included, O(client_key) is not required.
+ type: path
+ version_added: '1.3.0'
+ client_key:
+ description:
+ - PEM formatted file that contains your private key to be used for SSL client authentication.
+ - If O(client_cert) contains both the certificate and key, this option is not required.
+ type: path
+ version_added: '1.3.0'
+ keep_name:
+ description:
+ - If V(true), the downloaded artifact's name is preserved, in other words the version number remains part of it.
+ - This option only has effect when O(dest) is a directory and O(version) is set to V(latest) or O(version_by_spec) is
+ defined.
+ type: bool
+ default: false
+ verify_checksum:
+ type: str
+ description:
+ - If V(never), the MD5/SHA1 checksum is never downloaded and verified.
+ - If V(download), the MD5/SHA1 checksum is downloaded and verified only after artifact download. This is the default.
+ - If V(change), the MD5/SHA1 checksum is downloaded and verified if the destination already exist, to verify if they
+ are identical. This was the behaviour before 2.6. Since it downloads the checksum before (maybe) downloading the artifact,
+ and since some repository software, when acting as a proxy/cache, return a 404 error if the artifact has not been
+ cached yet, it may fail unexpectedly. If you still need it, you should consider using V(always) instead - if you deal
+ with a checksum, it is better to use it to verify integrity after download.
+ - V(always) combines V(download) and V(change).
+ required: false
+ default: 'download'
+ choices: ['never', 'download', 'change', 'always']
+ checksum_alg:
+ type: str
+ description:
+ - If V(md5), checksums use the MD5 algorithm. This is the default.
+ - If V(sha1), checksums use the SHA1 algorithm. This can be used on systems configured to use FIPS-compliant algorithms,
+ since MD5 is blocked on such systems.
+ default: 'md5'
+ choices: ['md5', 'sha1']
+ version_added: 3.2.0
+ unredirected_headers:
+ type: list
+ elements: str
+ version_added: 5.2.0
+ description:
+ - A list of headers that should not be included in the redirection. This headers are sent to the C(fetch_url) function.
+ - On ansible-core version 2.12 or later, the default of this option is V([Authorization, Cookie]).
+ - Useful if the redirection URL does not need to have sensitive headers in the request.
+ - Requires ansible-core version 2.12 or later.
+ directory_mode:
+ type: str
+ description:
+ - Filesystem permission mode applied recursively to O(dest) when it is a directory.
extends_documentation_fragment:
- - ansible.builtin.files
- - community.general.attributes
-'''
+ - ansible.builtin.files
+ - community.general.attributes
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Download the latest version of the JUnit framework artifact from Maven Central
community.general.maven_artifact:
group_id: junit
@@ -236,7 +233,7 @@ EXAMPLES = '''
artifact_id: junit
version_by_spec: "[3.8,4.0)"
dest: /tmp/
-'''
+"""
import hashlib
import os
@@ -247,7 +244,6 @@ import tempfile
import traceback
import re
-from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
from ansible.module_utils.ansible_release import __version__ as ansible_version
from re import match
@@ -625,35 +621,32 @@ def main():
argument_spec=dict(
group_id=dict(required=True),
artifact_id=dict(required=True),
- version=dict(default=None),
- version_by_spec=dict(default=None),
+ version=dict(),
+ version_by_spec=dict(),
classifier=dict(default=''),
extension=dict(default='jar'),
repository_url=dict(default='https://repo1.maven.org/maven2'),
- username=dict(default=None, aliases=['aws_secret_key']),
- password=dict(default=None, no_log=True, aliases=['aws_secret_access_key']),
+ username=dict(aliases=['aws_secret_key']),
+ password=dict(no_log=True, aliases=['aws_secret_access_key']),
headers=dict(type='dict'),
force_basic_auth=dict(default=False, type='bool'),
state=dict(default="present", choices=["present", "absent"]), # TODO - Implement a "latest" state
timeout=dict(default=10, type='int'),
dest=dict(type="path", required=True),
- validate_certs=dict(required=False, default=True, type='bool'),
- client_cert=dict(type="path", required=False),
- client_key=dict(type="path", required=False),
- keep_name=dict(required=False, default=False, type='bool'),
- verify_checksum=dict(required=False, default='download', choices=['never', 'download', 'change', 'always']),
- checksum_alg=dict(required=False, default='md5', choices=['md5', 'sha1']),
- unredirected_headers=dict(type='list', elements='str', required=False),
+ validate_certs=dict(default=True, type='bool'),
+ client_cert=dict(type="path"),
+ client_key=dict(type="path"),
+ keep_name=dict(default=False, type='bool'),
+ verify_checksum=dict(default='download', choices=['never', 'download', 'change', 'always']),
+ checksum_alg=dict(default='md5', choices=['md5', 'sha1']),
+ unredirected_headers=dict(type='list', elements='str'),
directory_mode=dict(type='str'),
),
add_file_common_args=True,
mutually_exclusive=([('version', 'version_by_spec')])
)
- if LooseVersion(ansible_version) < LooseVersion("2.12") and module.params['unredirected_headers']:
- module.fail_json(msg="Unredirected Headers parameter provided, but your ansible-core version does not support it. Minimum version is 2.12")
-
- if LooseVersion(ansible_version) >= LooseVersion("2.12") and module.params['unredirected_headers'] is None:
+ if module.params['unredirected_headers'] is None:
# if the user did not supply unredirected params, we use the default, ONLY on ansible core 2.12 and above
module.params['unredirected_headers'] = ['Authorization', 'Cookie']
diff --git a/plugins/modules/memset_dns_reload.py b/plugins/modules/memset_dns_reload.py
index 8cff51ade1..cb8ebe9191 100644
--- a/plugins/modules/memset_dns_reload.py
+++ b/plugins/modules/memset_dns_reload.py
@@ -8,53 +8,48 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
module: memset_dns_reload
author: "Simon Weald (@glitchcrab)"
short_description: Request reload of Memset's DNS infrastructure,
notes:
- - DNS reload requests are a best-effort service provided by Memset; these generally
- happen every 15 minutes by default, however you can request an immediate reload if
- later tasks rely on the records being created. An API key generated via the
- Memset customer control panel is required with the following minimum scope -
- C(dns.reload). If you wish to poll the job status to wait until the reload has
- completed, then C(job.status) is also required.
+ - DNS reload requests are a best-effort service provided by Memset; these generally happen every 15 minutes by default,
+ however you can request an immediate reload if later tasks rely on the records being created. An API key generated using
+ the Memset customer control panel is required with the following minimum scope - C(dns.reload). If you wish to poll the
+ job status to wait until the reload has completed, then C(job.status) is also required.
description:
- Request a reload of Memset's DNS infrastructure, and optionally poll until it finishes.
extends_documentation_fragment:
- community.general.attributes
attributes:
- check_mode:
- support: none
- diff_mode:
- support: none
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
options:
- api_key:
- required: true
- type: str
- description:
- - The API key obtained from the Memset control panel.
- poll:
- default: false
- type: bool
- description:
- - Boolean value, if set will poll the reload job's status and return
- when the job has completed (unless the 30 second timeout is reached first).
- If the timeout is reached then the task will not be marked as failed, but
- stderr will indicate that the polling failed.
-'''
+ api_key:
+ required: true
+ type: str
+ description:
+ - The API key obtained from the Memset control panel.
+ poll:
+ default: false
+ type: bool
+ description:
+ - If V(true), it polls the reload job's status and return when the job has completed (unless the 30 second timeout is
+ reached first). If the timeout is reached then the task does not return as failed, but stderr indicates that the polling
+ failed.
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Submit DNS reload and poll
community.general.memset_dns_reload:
api_key: 5eb86c9196ab03919abcf03857163741
poll: true
delegate_to: localhost
-'''
+"""
-RETURN = '''
----
+RETURN = r"""
memset_api:
description: Raw response from the Memset API.
returned: always
@@ -85,7 +80,7 @@ memset_api:
returned: always
type: str
sample: "dns"
-'''
+"""
from time import sleep
@@ -172,7 +167,7 @@ def main():
module = AnsibleModule(
argument_spec=dict(
api_key=dict(required=True, type='str', no_log=True),
- poll=dict(required=False, default=False, type='bool')
+ poll=dict(default=False, type='bool')
),
supports_check_mode=False
)
diff --git a/plugins/modules/memset_memstore_info.py b/plugins/modules/memset_memstore_info.py
index 5dfd1f956a..e9f2699812 100644
--- a/plugins/modules/memset_memstore_info.py
+++ b/plugins/modules/memset_memstore_info.py
@@ -8,107 +8,104 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
module: memset_memstore_info
author: "Simon Weald (@glitchcrab)"
short_description: Retrieve Memstore product usage information
notes:
- - An API key generated via the Memset customer control panel is needed with the
- following minimum scope - C(memstore.usage).
+ - An API key generated using the Memset customer control panel is needed with the following minimum scope - C(memstore.usage).
description:
- - Retrieve Memstore product usage information.
+ - Retrieve Memstore product usage information.
extends_documentation_fragment:
- - community.general.attributes
- - community.general.attributes.info_module
+ - community.general.attributes
+ - community.general.attributes.info_module
attributes:
- check_mode:
- version_added: 3.3.0
- # This was backported to 2.5.4 and 1.3.11 as well, since this was a bugfix
+ check_mode:
+ version_added: 3.3.0
+ # This was backported to 2.5.4 and 1.3.11 as well, since this was a bugfix
options:
- api_key:
- required: true
- type: str
- description:
- - The API key obtained from the Memset control panel.
- name:
- required: true
- type: str
- description:
- - The Memstore product name (that is, C(mstestyaa1)).
-'''
+ api_key:
+ required: true
+ type: str
+ description:
+ - The API key obtained from the Memset control panel.
+ name:
+ required: true
+ type: str
+ description:
+ - The Memstore product name (that is, V(mstestyaa1)).
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Get usage for mstestyaa1
community.general.memset_memstore_info:
name: mstestyaa1
api_key: 5eb86c9896ab03919abcf03857163741
delegate_to: localhost
-'''
+"""
-RETURN = '''
----
+RETURN = r"""
memset_api:
- description: Info from the Memset API
+ description: Info from the Memset API.
returned: always
type: complex
contains:
cdn_bandwidth:
- description: Dictionary of CDN bandwidth facts
+ description: Dictionary of CDN bandwidth facts.
returned: always
type: complex
contains:
bytes_out:
- description: Outbound CDN bandwidth for the last 24 hours in bytes
+ description: Outbound CDN bandwidth for the last 24 hours in bytes.
returned: always
type: int
sample: 1000
requests:
- description: Number of requests in the last 24 hours
+ description: Number of requests in the last 24 hours.
returned: always
type: int
sample: 10
bytes_in:
- description: Inbound CDN bandwidth for the last 24 hours in bytes
+ description: Inbound CDN bandwidth for the last 24 hours in bytes.
returned: always
type: int
sample: 1000
containers:
- description: Number of containers
+ description: Number of containers.
returned: always
type: int
sample: 10
bytes:
- description: Space used in bytes
+ description: Space used in bytes.
returned: always
type: int
sample: 3860997965
objs:
- description: Number of objects
+ description: Number of objects.
returned: always
type: int
sample: 1000
bandwidth:
- description: Dictionary of CDN bandwidth facts
+ description: Dictionary of CDN bandwidth facts.
returned: always
type: complex
contains:
bytes_out:
- description: Outbound bandwidth for the last 24 hours in bytes
+ description: Outbound bandwidth for the last 24 hours in bytes.
returned: always
type: int
sample: 1000
requests:
- description: Number of requests in the last 24 hours
+ description: Number of requests in the last 24 hours.
returned: always
type: int
sample: 10
bytes_in:
- description: Inbound bandwidth for the last 24 hours in bytes
+ description: Inbound bandwidth for the last 24 hours in bytes.
returned: always
type: int
sample: 1000
-'''
+"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.general.plugins.module_utils.memset import memset_api_call
diff --git a/plugins/modules/memset_server_info.py b/plugins/modules/memset_server_info.py
index 40862ae944..59d395a161 100644
--- a/plugins/modules/memset_server_info.py
+++ b/plugins/modules/memset_server_info.py
@@ -8,48 +8,45 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
module: memset_server_info
author: "Simon Weald (@glitchcrab)"
short_description: Retrieve server information
notes:
- - An API key generated via the Memset customer control panel is needed with the
- following minimum scope - C(server.info).
+ - An API key generated using the Memset customer control panel is needed with the following minimum scope - C(server.info).
description:
- - Retrieve server information.
+ - Retrieve server information.
extends_documentation_fragment:
- - community.general.attributes
- - community.general.attributes.info_module
+ - community.general.attributes
+ - community.general.attributes.info_module
attributes:
- check_mode:
- version_added: 3.3.0
- # This was backported to 2.5.4 and 1.3.11 as well, since this was a bugfix
+ check_mode:
+ version_added: 3.3.0
+ # This was backported to 2.5.4 and 1.3.11 as well, since this was a bugfix
options:
- api_key:
- required: true
- type: str
- description:
- - The API key obtained from the Memset control panel.
- name:
- required: true
- type: str
- description:
- - The server product name (that is, C(testyaa1)).
-'''
+ api_key:
+ required: true
+ type: str
+ description:
+ - The API key obtained from the Memset control panel.
+ name:
+ required: true
+ type: str
+ description:
+ - The server product name (that is, C(testyaa1)).
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Get details for testyaa1
community.general.memset_server_info:
name: testyaa1
api_key: 5eb86c9896ab03919abcf03857163741
delegate_to: localhost
-'''
+"""
-RETURN = '''
----
+RETURN = r"""
memset_api:
- description: Info from the Memset API
+ description: Info from the Memset API.
returned: always
type: complex
contains:
@@ -59,7 +56,7 @@ memset_api:
type: bool
sample: true
control_panel:
- description: Whether the server has a control panel (i.e. cPanel).
+ description: Whether the server has a control panel (for example cPanel).
returned: always
type: str
sample: 'cpanel'
@@ -77,33 +74,34 @@ memset_api:
description: Details about the firewall group this server is in.
returned: always
type: dict
- sample: {
- "default_outbound_policy": "RETURN",
- "name": "testyaa-fw1",
- "nickname": "testyaa cPanel rules",
- "notes": "",
- "public": false,
- "rules": {
- "51d7db54d39c3544ef7c48baa0b9944f": {
- "action": "ACCEPT",
- "comment": "",
- "dest_ip6s": "any",
- "dest_ips": "any",
- "dest_ports": "any",
- "direction": "Inbound",
- "ip_version": "any",
- "ordering": 2,
- "protocols": "icmp",
- "rule_group_name": "testyaa-fw1",
- "rule_id": "51d7db54d39c3544ef7c48baa0b9944f",
- "source_ip6s": "any",
- "source_ips": "any",
- "source_ports": "any"
+ sample:
+ {
+ "default_outbound_policy": "RETURN",
+ "name": "testyaa-fw1",
+ "nickname": "testyaa cPanel rules",
+ "notes": "",
+ "public": false,
+ "rules": {
+ "51d7db54d39c3544ef7c48baa0b9944f": {
+ "action": "ACCEPT",
+ "comment": "",
+ "dest_ip6s": "any",
+ "dest_ips": "any",
+ "dest_ports": "any",
+ "direction": "Inbound",
+ "ip_version": "any",
+ "ordering": 2,
+ "protocols": "icmp",
+ "rule_group_name": "testyaa-fw1",
+ "rule_id": "51d7db54d39c3544ef7c48baa0b9944f",
+ "source_ip6s": "any",
+ "source_ips": "any",
+ "source_ports": "any"
+ }
}
}
- }
firewall_type:
- description: The type of firewall the server has (i.e. self-managed, managed).
+ description: The type of firewall the server has (for example self-managed, managed).
returned: always
type: str
sample: 'managed'
@@ -113,7 +111,7 @@ memset_api:
type: str
sample: 'testyaa1.miniserver.com'
ignore_monitoring_off:
- description: When true, Memset won't remind the customer that monitoring is disabled.
+ description: When true, Memset does not remind the customer that monitoring is disabled.
returned: always
type: bool
sample: true
@@ -121,22 +119,23 @@ memset_api:
description: List of dictionaries of all IP addresses assigned to the server.
returned: always
type: list
- sample: [
- {
- "address": "1.2.3.4",
- "bytes_in_today": 1000.0,
- "bytes_in_yesterday": 2000.0,
- "bytes_out_today": 1000.0,
- "bytes_out_yesterday": 2000.0
- }
- ]
+ sample:
+ [
+ {
+ "address": "1.2.3.4",
+ "bytes_in_today": 1000.0,
+ "bytes_in_yesterday": 2000.0,
+ "bytes_out_today": 1000.0,
+ "bytes_out_yesterday": 2000.0
+ }
+ ]
monitor:
description: Whether the server has monitoring enabled.
returned: always
type: bool
sample: true
monitoring_level:
- description: The server's monitoring level (i.e. basic).
+ description: The server's monitoring level (for example V(basic)).
returned: always
type: str
sample: 'basic'
@@ -149,7 +148,7 @@ memset_api:
description: The network zone(s) the server is in.
returned: always
type: list
- sample: [ 'reading' ]
+ sample: ["reading"]
nickname:
description: Customer-set nickname for the server.
returned: always
@@ -196,7 +195,7 @@ memset_api:
type: str
sample: 'GBP'
renewal_price_vat:
- description: VAT rate for renewal payments
+ description: VAT rate for renewal payments.
returned: always
type: str
sample: '20'
@@ -206,7 +205,7 @@ memset_api:
type: str
sample: '2013-04-10'
status:
- description: Current status of the server (i.e. live, onhold).
+ description: Current status of the server (for example live, onhold).
returned: always
type: str
sample: 'LIVE'
@@ -216,7 +215,7 @@ memset_api:
type: str
sample: 'managed'
type:
- description: What this server is (i.e. dedicated)
+ description: What this server is (for example V(dedicated)).
returned: always
type: str
sample: 'miniserver'
@@ -224,16 +223,20 @@ memset_api:
description: Dictionary of tagged and untagged VLANs this server is in.
returned: always
type: dict
- sample: {
- tagged: [],
- untagged: [ 'testyaa-vlan1', 'testyaa-vlan2' ]
- }
+ sample:
+ {
+ "tagged": [],
+ "untagged": [
+ "testyaa-vlan1",
+ "testyaa-vlan2"
+ ]
+ }
vulnscan:
description: Vulnerability scanning level.
returned: always
type: str
sample: 'basic'
-'''
+"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.general.plugins.module_utils.memset import memset_api_call
diff --git a/plugins/modules/memset_zone.py b/plugins/modules/memset_zone.py
index e405ad3e86..553328909d 100644
--- a/plugins/modules/memset_zone.py
+++ b/plugins/modules/memset_zone.py
@@ -8,60 +8,56 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
module: memset_zone
author: "Simon Weald (@glitchcrab)"
short_description: Creates and deletes Memset DNS zones
notes:
- - Zones can be thought of as a logical group of domains, all of which share the
- same DNS records (i.e. they point to the same IP). An API key generated via the
- Memset customer control panel is needed with the following minimum scope -
- C(dns.zone_create), C(dns.zone_delete), C(dns.zone_list).
+ - Zones can be thought of as a logical group of domains, all of which share the same DNS records (in other words they point
+ to the same IP). An API key generated using the Memset customer control panel is needed with the following minimum scope
+ - C(dns.zone_create), C(dns.zone_delete), C(dns.zone_list).
description:
- Manage DNS zones in a Memset account.
extends_documentation_fragment:
- community.general.attributes
attributes:
- check_mode:
- support: full
- diff_mode:
- support: none
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
options:
- state:
- required: true
- description:
- - Indicates desired state of resource.
- type: str
- choices: [ absent, present ]
- api_key:
- required: true
- description:
- - The API key obtained from the Memset control panel.
- type: str
- name:
- required: true
- description:
- - The zone nickname; usually the same as the main domain. Ensure this
- value has at most 250 characters.
- type: str
- aliases: [ nickname ]
- ttl:
- description:
- - The default TTL for all records created in the zone. This must be a
- valid int from U(https://www.memset.com/apidocs/methods_dns.html#dns.zone_create).
- type: int
- default: 0
- choices: [ 0, 300, 600, 900, 1800, 3600, 7200, 10800, 21600, 43200, 86400 ]
- force:
- required: false
- default: false
- type: bool
- description:
- - Forces deletion of a zone and all zone domains/zone records it contains.
-'''
+ state:
+ required: true
+ description:
+ - Indicates desired state of resource.
+ type: str
+ choices: [absent, present]
+ api_key:
+ required: true
+ description:
+ - The API key obtained from the Memset control panel.
+ type: str
+ name:
+ required: true
+ description:
+ - The zone nickname; usually the same as the main domain. Ensure this value has at most 250 characters.
+ type: str
+ aliases: [nickname]
+ ttl:
+ description:
+ - The default TTL for all records created in the zone. This must be a valid int from U(https://www.memset.com/apidocs/methods_dns.html#dns.zone_create).
+ type: int
+ default: 0
+ choices: [0, 300, 600, 900, 1800, 3600, 7200, 10800, 21600, 43200, 86400]
+ force:
+ required: false
+ default: false
+ type: bool
+ description:
+ - Forces deletion of a zone and all zone domains/zone records it contains.
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
# Create the zone 'test'
- name: Create zone
community.general.memset_zone:
@@ -79,40 +75,40 @@ EXAMPLES = '''
api_key: 5eb86c9196ab03919abcf03857163741
force: true
delegate_to: localhost
-'''
+"""
-RETURN = '''
+RETURN = r"""
memset_api:
- description: Zone info from the Memset API
+ description: Zone info from the Memset API.
returned: when state == present
type: complex
contains:
domains:
- description: List of domains in this zone
+ description: List of domains in this zone.
returned: always
type: list
sample: []
id:
- description: Zone id
+ description: Zone ID.
returned: always
type: str
sample: "b0bb1ce851aeea6feeb2dc32fe83bf9c"
nickname:
- description: Zone name
+ description: Zone name.
returned: always
type: str
sample: "example.com"
records:
- description: List of DNS records for domains in this zone
+ description: List of DNS records for domains in this zone.
returned: always
type: list
sample: []
ttl:
- description: Default TTL for domains in this zone
+ description: Default TTL for domains in this zone.
returned: always
type: int
sample: 300
-'''
+"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.general.plugins.module_utils.memset import check_zone
@@ -293,8 +289,8 @@ def main():
state=dict(required=True, choices=['present', 'absent'], type='str'),
api_key=dict(required=True, type='str', no_log=True),
name=dict(required=True, aliases=['nickname'], type='str'),
- ttl=dict(required=False, default=0, choices=[0, 300, 600, 900, 1800, 3600, 7200, 10800, 21600, 43200, 86400], type='int'),
- force=dict(required=False, default=False, type='bool')
+ ttl=dict(default=0, choices=[0, 300, 600, 900, 1800, 3600, 7200, 10800, 21600, 43200, 86400], type='int'),
+ force=dict(default=False, type='bool')
),
supports_check_mode=True
)
diff --git a/plugins/modules/memset_zone_domain.py b/plugins/modules/memset_zone_domain.py
index 7443e6c256..6e4dd27320 100644
--- a/plugins/modules/memset_zone_domain.py
+++ b/plugins/modules/memset_zone_domain.py
@@ -8,53 +8,50 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
module: memset_zone_domain
author: "Simon Weald (@glitchcrab)"
short_description: Create and delete domains in Memset DNS zones
notes:
- - Zone domains can be thought of as a collection of domains, all of which share the
- same DNS records (i.e. they point to the same IP). An API key generated via the
- Memset customer control panel is needed with the following minimum scope -
- C(dns.zone_domain_create), C(dns.zone_domain_delete), C(dns.zone_domain_list).
- - Currently this module can only create one domain at a time. Multiple domains should
- be created using C(loop).
+ - Zone domains can be thought of as a collection of domains, all of which share the same DNS records (in other words, they
+ point to the same IP). An API key generated using the Memset customer control panel is needed with the following minimum
+ scope - C(dns.zone_domain_create), C(dns.zone_domain_delete), C(dns.zone_domain_list).
+ - Currently this module can only create one domain at a time. Multiple domains should be created using C(loop).
description:
- Manage DNS zone domains in a Memset account.
extends_documentation_fragment:
- community.general.attributes
attributes:
- check_mode:
- support: full
- diff_mode:
- support: none
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
options:
- state:
- default: present
- description:
- - Indicates desired state of resource.
- type: str
- choices: [ absent, present ]
- api_key:
- required: true
- description:
- - The API key obtained from the Memset control panel.
- type: str
- domain:
- required: true
- description:
- - The zone domain name. Ensure this value has at most 250 characters.
- type: str
- aliases: ['name']
- zone:
- required: true
- description:
- - The zone to add the domain to (this must already exist).
- type: str
-'''
+ state:
+ default: present
+ description:
+ - Indicates desired state of resource.
+ type: str
+ choices: [absent, present]
+ api_key:
+ required: true
+ description:
+ - The API key obtained from the Memset control panel.
+ type: str
+ domain:
+ required: true
+ description:
+ - The zone domain name. Ensure this value has at most 250 characters.
+ type: str
+ aliases: ['name']
+ zone:
+ required: true
+ description:
+ - The zone to add the domain to (this must already exist).
+ type: str
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
# Create the zone domain 'test.com'
- name: Create zone domain
community.general.memset_zone_domain:
@@ -63,25 +60,25 @@ EXAMPLES = '''
state: present
api_key: 5eb86c9196ab03919abcf03857163741
delegate_to: localhost
-'''
+"""
-RETURN = '''
+RETURN = r"""
memset_api:
- description: Domain info from the Memset API
+ description: Domain info from the Memset API.
returned: when changed or state == present
type: complex
contains:
domain:
- description: Domain name
+ description: Domain name.
returned: always
type: str
sample: "example.com"
id:
- description: Domain ID
+ description: Domain ID.
returned: always
type: str
sample: "b0bb1ce851aeea6feeb2dc32fe83bf9c"
-'''
+"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.general.plugins.module_utils.memset import get_zone_id
@@ -256,7 +253,7 @@ def main():
retvals = create_or_delete_domain(args)
# we would need to populate the return values with the API's response
- # in several places so it's easier to do it at the end instead.
+ # in several places so it is easier to do it at the end instead.
if not retvals['failed']:
if args['state'] == 'present' and not module.check_mode:
payload = dict()
diff --git a/plugins/modules/memset_zone_record.py b/plugins/modules/memset_zone_record.py
index 349240b84e..fd87c35fa0 100644
--- a/plugins/modules/memset_zone_record.py
+++ b/plugins/modules/memset_zone_record.py
@@ -8,83 +8,79 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
module: memset_zone_record
author: "Simon Weald (@glitchcrab)"
short_description: Create and delete records in Memset DNS zones
notes:
- - Zones can be thought of as a logical group of domains, all of which share the
- same DNS records (i.e. they point to the same IP). An API key generated via the
- Memset customer control panel is needed with the following minimum scope -
- C(dns.zone_create), C(dns.zone_delete), C(dns.zone_list).
- - Currently this module can only create one DNS record at a time. Multiple records
- should be created using C(loop).
+ - Zones can be thought of as a logical group of domains, all of which share the same DNS records (in other words they point
+ to the same IP). An API key generated using the Memset customer control panel is needed with the following minimum scope
+ - C(dns.zone_create), C(dns.zone_delete), C(dns.zone_list).
+ - Currently this module can only create one DNS record at a time. Multiple records should be created using C(loop).
description:
- Manage DNS records in a Memset account.
extends_documentation_fragment:
- community.general.attributes
attributes:
- check_mode:
- support: full
- diff_mode:
- support: none
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
options:
- state:
- default: present
- description:
- - Indicates desired state of resource.
- type: str
- choices: [ absent, present ]
- api_key:
- required: true
- description:
- - The API key obtained from the Memset control panel.
- type: str
- address:
- required: true
- description:
- - The address for this record (can be IP or text string depending on record type).
- type: str
- aliases: [ ip, data ]
- priority:
- description:
- - C(SRV) and C(TXT) record priority, in the range 0 > 999 (inclusive).
- type: int
- default: 0
- record:
- required: false
- description:
- - The subdomain to create.
- type: str
- default: ''
- type:
- required: true
- description:
- - The type of DNS record to create.
- choices: [ A, AAAA, CNAME, MX, NS, SRV, TXT ]
- type: str
- relative:
- type: bool
- default: false
- description:
- - If set then the current domain is added onto the address field for C(CNAME), C(MX), C(NS)
- and C(SRV)record types.
- ttl:
- description:
- - The record's TTL in seconds (will inherit zone's TTL if not explicitly set). This must be a
- valid int from U(https://www.memset.com/apidocs/methods_dns.html#dns.zone_record_create).
- default: 0
- choices: [ 0, 300, 600, 900, 1800, 3600, 7200, 10800, 21600, 43200, 86400 ]
- type: int
- zone:
- required: true
- description:
- - The name of the zone to which to add the record to.
- type: str
-'''
+ state:
+ default: present
+ description:
+ - Indicates desired state of resource.
+ type: str
+ choices: [absent, present]
+ api_key:
+ required: true
+ description:
+ - The API key obtained from the Memset control panel.
+ type: str
+ address:
+ required: true
+ description:
+ - The address for this record (can be IP or text string depending on record type).
+ type: str
+ aliases: [ip, data]
+ priority:
+ description:
+ - C(SRV) and C(TXT) record priority, in the range 0 > 999 (inclusive).
+ type: int
+ default: 0
+ record:
+ required: false
+ description:
+ - The subdomain to create.
+ type: str
+ default: ''
+ type:
+ required: true
+ description:
+ - The type of DNS record to create.
+ choices: [A, AAAA, CNAME, MX, NS, SRV, TXT]
+ type: str
+ relative:
+ type: bool
+ default: false
+ description:
+ - If set then the current domain is added onto the address field for C(CNAME), C(MX), C(NS) and C(SRV)record types.
+ ttl:
+ description:
+ - The record's TTL in seconds (will inherit zone's TTL if not explicitly set). This must be a valid int from
+ U(https://www.memset.com/apidocs/methods_dns.html#dns.zone_record_create).
+ default: 0
+ choices: [0, 300, 600, 900, 1800, 3600, 7200, 10800, 21600, 43200, 86400]
+ type: int
+ zone:
+ required: true
+ description:
+ - The name of the zone to which to add the record to.
+ type: str
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
# Create DNS record for www.domain.com
- name: Create DNS record
community.general.memset_zone_record:
@@ -118,11 +114,11 @@ EXAMPLES = '''
address: "{{ item.address }}"
delegate_to: localhost
with_items:
- - { 'zone': 'domain1.com', 'type': 'A', 'record': 'www', 'address': '1.2.3.4' }
- - { 'zone': 'domain2.com', 'type': 'A', 'record': 'mail', 'address': '4.3.2.1' }
-'''
+ - {'zone': 'domain1.com', 'type': 'A', 'record': 'www', 'address': '1.2.3.4'}
+ - {'zone': 'domain2.com', 'type': 'A', 'record': 'mail', 'address': '4.3.2.1'}
+"""
-RETURN = '''
+RETURN = r"""
memset_api:
description: Record info from the Memset API.
returned: when state == present
@@ -168,7 +164,7 @@ memset_api:
returned: always
type: str
sample: "b0bb1ce851aeea6feeb2dc32fe83bf9c"
-'''
+"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.general.plugins.module_utils.memset import get_zone_id
@@ -360,15 +356,15 @@ def main():
global module
module = AnsibleModule(
argument_spec=dict(
- state=dict(required=False, default='present', choices=['present', 'absent'], type='str'),
+ state=dict(default='present', choices=['present', 'absent'], type='str'),
api_key=dict(required=True, type='str', no_log=True),
zone=dict(required=True, type='str'),
type=dict(required=True, choices=['A', 'AAAA', 'CNAME', 'MX', 'NS', 'SRV', 'TXT'], type='str'),
address=dict(required=True, aliases=['ip', 'data'], type='str'),
- record=dict(required=False, default='', type='str'),
- ttl=dict(required=False, default=0, choices=[0, 300, 600, 900, 1800, 3600, 7200, 10800, 21600, 43200, 86400], type='int'),
- priority=dict(required=False, default=0, type='int'),
- relative=dict(required=False, default=False, type='bool')
+ record=dict(default='', type='str'),
+ ttl=dict(default=0, choices=[0, 300, 600, 900, 1800, 3600, 7200, 10800, 21600, 43200, 86400], type='int'),
+ priority=dict(default=0, type='int'),
+ relative=dict(default=False, type='bool')
),
supports_check_mode=True
)
diff --git a/plugins/modules/mksysb.py b/plugins/modules/mksysb.py
index d1f49ca82e..7e188ec844 100644
--- a/plugins/modules/mksysb.py
+++ b/plugins/modules/mksysb.py
@@ -10,20 +10,19 @@ from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = """
----
+DOCUMENTATION = r"""
author: Kairo Araujo (@kairoaraujo)
module: mksysb
short_description: Generates AIX mksysb rootvg backups
description:
-- This module manages a basic AIX mksysb (image) of rootvg.
+ - This module manages a basic AIX mksysb (image) of rootvg.
seealso:
-- name: C(mksysb) command manual page
- description: Manual page for the command.
- link: https://www.ibm.com/docs/en/aix/7.3?topic=m-mksysb-command
+ - name: C(mksysb) command manual page
+ description: Manual page for the command.
+ link: https://www.ibm.com/docs/en/aix/7.3?topic=m-mksysb-command
extends_documentation_fragment:
-- community.general.attributes
+ - community.general.attributes
attributes:
check_mode:
support: full
@@ -32,63 +31,62 @@ attributes:
options:
backup_crypt_files:
description:
- - Backup encrypted files.
+ - Backup encrypted files.
type: bool
default: true
backup_dmapi_fs:
description:
- - Back up DMAPI filesystem files.
+ - Back up DMAPI filesystem files.
type: bool
default: true
create_map_files:
description:
- - Creates a new MAP files.
+ - Creates a new MAP files.
type: bool
default: false
exclude_files:
description:
- - Excludes files using C(/etc/rootvg.exclude).
+ - Excludes files using C(/etc/rootvg.exclude).
type: bool
default: false
exclude_wpar_files:
description:
- - Excludes WPAR files.
+ - Excludes WPAR files.
type: bool
default: false
extended_attrs:
description:
- - Backup extended attributes.
+ - Backup extended attributes.
type: bool
default: true
name:
type: str
description:
- - Backup name
+ - Backup name.
required: true
new_image_data:
description:
- - Creates a new file data.
+ - Creates a new file data.
type: bool
default: true
software_packing:
description:
- - Exclude files from packing option listed in C(/etc/exclude_packing.rootvg).
+ - Exclude files from packing option listed in C(/etc/exclude_packing.rootvg).
type: bool
default: false
storage_path:
type: str
description:
- - Storage path where the mksysb will stored.
+ - Storage path where the mksysb backup is stored.
required: true
use_snapshot:
description:
- - Creates backup using snapshots.
+ - Creates backup using snapshots.
type: bool
default: false
"""
-EXAMPLES = """
----
+EXAMPLES = r"""
- name: Running a backup image mksysb
community.general.mksysb:
name: myserver
@@ -97,12 +95,7 @@ EXAMPLES = """
exclude_wpar_files: true
"""
-RETURN = """
----
-changed:
- description: Return changed for mksysb actions as true or false.
- returned: always
- type: bool
+RETURN = r"""
msg:
description: Return message regarding the action.
returned: always
@@ -144,7 +137,6 @@ class MkSysB(ModuleHelper):
backup_dmapi_fs=cmd_runner_fmt.as_bool("-A"),
combined_path=cmd_runner_fmt.as_func(cmd_runner_fmt.unpack_args(lambda p, n: ["%s/%s" % (p, n)])),
)
- use_old_vardict = False
def __init_module__(self):
if not os.path.isdir(self.vars.storage_path):
diff --git a/plugins/modules/modprobe.py b/plugins/modules/modprobe.py
index 3d6a7c2410..d5bb6fddbb 100644
--- a/plugins/modules/modprobe.py
+++ b/plugins/modules/modprobe.py
@@ -8,59 +8,60 @@
from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
module: modprobe
short_description: Load or unload kernel modules
author:
- - David Stygstra (@stygstra)
- - Julien Dauphant (@jdauphant)
- - Matt Jeffery (@mattjeffery)
+ - David Stygstra (@stygstra)
+ - Julien Dauphant (@jdauphant)
+ - Matt Jeffery (@mattjeffery)
description:
- - Load or unload kernel modules.
+ - Load or unload kernel modules.
extends_documentation_fragment:
- - community.general.attributes
+ - community.general.attributes
attributes:
- check_mode:
- support: full
- diff_mode:
- support: none
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
options:
- name:
- type: str
- required: true
- description:
- - Name of kernel module to manage.
- state:
- type: str
- description:
- - Whether the module should be present or absent.
- choices: [ absent, present ]
- default: present
- params:
- type: str
- description:
- - Modules parameters.
- default: ''
- persistent:
- type: str
- choices: [ disabled, absent, present ]
- default: disabled
- version_added: 7.0.0
- description:
- - Persistency between reboots for configured module.
- - This option creates files in C(/etc/modules-load.d/) and C(/etc/modprobe.d/) that make your module configuration persistent during reboots.
- - If V(present), adds module name to C(/etc/modules-load.d/) and params to C(/etc/modprobe.d/) so the module will be loaded on next reboot.
- - If V(absent), will comment out module name from C(/etc/modules-load.d/) and comment out params from C(/etc/modprobe.d/) so the module will not be
- loaded on next reboot.
- - If V(disabled), will not touch anything and leave C(/etc/modules-load.d/) and C(/etc/modprobe.d/) as it is.
- - Note that it is usually a better idea to rely on the automatic module loading by PCI IDs, USB IDs, DMI IDs or similar triggers encoded in the
- kernel modules themselves instead of configuration like this.
- - In fact, most modern kernel modules are prepared for automatic loading already.
- - "B(Note:) This option works only with distributions that use C(systemd) when set to values other than V(disabled)."
-'''
+ name:
+ type: str
+ required: true
+ description:
+ - Name of kernel module to manage.
+ state:
+ type: str
+ description:
+ - Whether the module should be present or absent.
+ choices: [absent, present]
+ default: present
+ params:
+ type: str
+ description:
+ - Modules parameters.
+ default: ''
+ persistent:
+ type: str
+ choices: [disabled, absent, present]
+ default: disabled
+ version_added: 7.0.0
+ description:
+ - Persistency between reboots for configured module.
+ - This option creates files in C(/etc/modules-load.d/) and C(/etc/modprobe.d/) that make your module configuration persistent
+ during reboots.
+ - If V(present), adds module name to C(/etc/modules-load.d/) and params to C(/etc/modprobe.d/) so the module is loaded
+ on next reboot.
+ - If V(absent), comments out module name from C(/etc/modules-load.d/) and comments out params from C(/etc/modprobe.d/)
+ so the module is not loaded on next reboot.
+ - If V(disabled), does not touch anything and leaves C(/etc/modules-load.d/) and C(/etc/modprobe.d/) as it is.
+ - Note that it is usually a better idea to rely on the automatic module loading by PCI IDs, USB IDs, DMI IDs or similar
+ triggers encoded in the kernel modules themselves instead of configuration like this.
+ - In fact, most modern kernel modules are prepared for automatic loading already.
+ - B(Note:) This option works only with distributions that use C(systemd) when set to values other than V(disabled).
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Add the 802.1q module
community.general.modprobe:
name: 8021q
@@ -78,7 +79,7 @@ EXAMPLES = '''
state: present
params: 'numdummies=2'
persistent: present
-'''
+"""
import os.path
import platform
diff --git a/plugins/modules/monit.py b/plugins/modules/monit.py
index 5475ab1e52..a10967264c 100644
--- a/plugins/modules/monit.py
+++ b/plugins/modules/monit.py
@@ -9,14 +9,13 @@ from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
module: monit
-short_description: Manage the state of a program monitored via Monit
+short_description: Manage the state of a program monitored using Monit
description:
- - Manage the state of a program monitored via Monit.
+ - Manage the state of a program monitored using Monit.
extends_documentation_fragment:
- - community.general.attributes
+ - community.general.attributes
attributes:
check_mode:
support: full
@@ -32,26 +31,25 @@ options:
description:
- The state of service.
required: true
- choices: [ "present", "started", "stopped", "restarted", "monitored", "unmonitored", "reloaded" ]
+ choices: ["present", "started", "stopped", "restarted", "monitored", "unmonitored", "reloaded"]
type: str
timeout:
description:
- - If there are pending actions for the service monitored by monit, then Ansible will check
- for up to this many seconds to verify the requested action has been performed.
- Ansible will sleep for five seconds between each check.
+ - If there are pending actions for the service monitored by monit, then it checks for up to this many seconds to verify
+ the requested action has been performed. The module sleeps for five seconds between each check.
default: 300
type: int
author:
- - Darryl Stoflet (@dstoflet)
- - Simon Kelly (@snopoke)
-'''
+ - Darryl Stoflet (@dstoflet)
+ - Simon Kelly (@snopoke)
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Manage the state of program httpd to be in started state
community.general.monit:
name: httpd
state: started
-'''
+"""
import time
import re
@@ -218,7 +216,7 @@ class Monit(object):
return running_status
def wait_for_monit_to_stop_pending(self, current_status=None):
- """Fails this run if there is no status or it's pending/initializing for timeout"""
+ """Fails this run if there is no status or it is pending/initializing for timeout"""
timeout_time = time.time() + self.timeout
if not current_status:
diff --git a/plugins/modules/mqtt.py b/plugins/modules/mqtt.py
index f8d64e6a00..b35a257da7 100644
--- a/plugins/modules/mqtt.py
+++ b/plugins/modules/mqtt.py
@@ -9,8 +9,7 @@ from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
module: mqtt
short_description: Publish a message on an MQTT topic for the IoT
description:
@@ -26,12 +25,12 @@ options:
server:
type: str
description:
- - MQTT broker address/name
+ - MQTT broker address/name.
default: localhost
port:
type: int
description:
- - MQTT broker port number
+ - MQTT broker port number.
default: 1883
username:
type: str
@@ -44,76 +43,68 @@ options:
client_id:
type: str
description:
- - MQTT client identifier
- - If not specified, a value C(hostname + pid) will be used.
+ - MQTT client identifier.
+ - If not specified, it uses a value C(hostname + pid).
topic:
type: str
description:
- - MQTT topic name
+ - MQTT topic name.
required: true
payload:
type: str
description:
- - Payload. The special string V("None") may be used to send a NULL
- (that is, empty) payload which is useful to simply notify with the O(topic)
- or to clear previously retained messages.
+ - Payload. The special string V("None") may be used to send a NULL (that is, empty) payload which is useful to simply
+ notify with the O(topic) or to clear previously retained messages.
required: true
qos:
type: str
description:
- - QoS (Quality of Service)
+ - QoS (Quality of Service).
default: "0"
- choices: [ "0", "1", "2" ]
+ choices: ["0", "1", "2"]
retain:
description:
- - Setting this flag causes the broker to retain (i.e. keep) the message so that
- applications that subsequently subscribe to the topic can received the last
- retained message immediately.
+ - Setting this flag causes the broker to retain (in other words keep) the message so that applications that subsequently
+ subscribe to the topic can received the last retained message immediately.
type: bool
default: false
ca_cert:
type: path
description:
- - The path to the Certificate Authority certificate files that are to be
- treated as trusted by this client. If this is the only option given
- then the client will operate in a similar manner to a web browser. That
- is to say it will require the broker to have a certificate signed by the
- Certificate Authorities in ca_certs and will communicate using TLS v1,
- but will not attempt any form of authentication. This provides basic
- network encryption but may not be sufficient depending on how the broker
- is configured.
- aliases: [ ca_certs ]
+ - The path to the Certificate Authority certificate files that are to be treated as trusted by this client. If this
+ is the only option given then the client operates in a similar manner to a web browser. That is to say it requires
+ the broker to have a certificate signed by the Certificate Authorities in ca_certs and communicates using TLS v1,
+ but does not attempt any form of authentication. This provides basic network encryption but may not be sufficient
+ depending on how the broker is configured.
+ aliases: [ca_certs]
client_cert:
type: path
description:
- - The path pointing to the PEM encoded client certificate. If this is not
- None it will be used as client information for TLS based
+ - The path pointing to the PEM encoded client certificate. If this is set it is used as client information for TLS based
authentication. Support for this feature is broker dependent.
- aliases: [ certfile ]
+ aliases: [certfile]
client_key:
type: path
description:
- - The path pointing to the PEM encoded client private key. If this is not
- None it will be used as client information for TLS based
+ - The path pointing to the PEM encoded client private key. If this is set it is used as client information for TLS based
authentication. Support for this feature is broker dependent.
- aliases: [ keyfile ]
+ aliases: [keyfile]
tls_version:
description:
- Specifies the version of the SSL/TLS protocol to be used.
- - By default (if the python version supports it) the highest TLS version is
- detected. If unavailable, TLS v1 is used.
+ - By default (if the python version supports it) the highest TLS version is detected. If unavailable, TLS v1 is used.
type: str
choices:
- tlsv1.1
- tlsv1.2
-requirements: [ mosquitto ]
+requirements: [mosquitto]
notes:
- - This module requires a connection to an MQTT broker such as Mosquitto
- U(http://mosquitto.org) and the I(Paho) C(mqtt) Python client (U(https://pypi.org/project/paho-mqtt/)).
+ - This module requires a connection to an MQTT broker such as Mosquitto U(http://mosquitto.org) and the I(Paho) C(mqtt)
+ Python client (U(https://pypi.org/project/paho-mqtt/)).
author: "Jan-Piet Mens (@jpmens)"
-'''
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Publish a message on an MQTT topic
community.general.mqtt:
topic: 'service/ansible/{{ ansible_hostname }}'
@@ -122,7 +113,7 @@ EXAMPLES = '''
retain: false
client_id: ans001
delegate_to: localhost
-'''
+"""
# ===========================================
# MQTT module support methods.
@@ -171,15 +162,15 @@ def main():
port=dict(default=1883, type='int'),
topic=dict(required=True),
payload=dict(required=True),
- client_id=dict(default=None),
+ client_id=dict(),
qos=dict(default="0", choices=["0", "1", "2"]),
retain=dict(default=False, type='bool'),
- username=dict(default=None),
- password=dict(default=None, no_log=True),
- ca_cert=dict(default=None, type='path', aliases=['ca_certs']),
- client_cert=dict(default=None, type='path', aliases=['certfile']),
- client_key=dict(default=None, type='path', aliases=['keyfile']),
- tls_version=dict(default=None, choices=['tlsv1.1', 'tlsv1.2'])
+ username=dict(),
+ password=dict(no_log=True),
+ ca_cert=dict(type='path', aliases=['ca_certs']),
+ client_cert=dict(type='path', aliases=['certfile']),
+ client_key=dict(type='path', aliases=['keyfile']),
+ tls_version=dict(choices=['tlsv1.1', 'tlsv1.2'])
),
supports_check_mode=True
)
diff --git a/plugins/modules/mssql_db.py b/plugins/modules/mssql_db.py
index a85f721fca..8a15bfe699 100644
--- a/plugins/modules/mssql_db.py
+++ b/plugins/modules/mssql_db.py
@@ -10,8 +10,7 @@ from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
module: mssql_db
short_description: Add or remove MSSQL databases from a remote host
description:
@@ -26,56 +25,55 @@ attributes:
options:
name:
description:
- - name of the database to add or remove
+ - Name of the database to add or remove.
required: true
- aliases: [ db ]
+ aliases: [db]
type: str
login_user:
description:
- - The username used to authenticate with
+ - The username used to authenticate with.
type: str
default: ''
login_password:
description:
- - The password used to authenticate with
+ - The password used to authenticate with.
type: str
default: ''
login_host:
description:
- - Host running the database
+ - Host running the database.
type: str
required: true
login_port:
description:
- - Port of the MSSQL server. Requires login_host be defined as other than localhost if login_port is used
+ - Port of the MSSQL server. Requires login_host be defined as other than localhost if login_port is used.
default: '1433'
type: str
state:
description:
- - The database state
+ - The database state.
default: present
- choices: [ "present", "absent", "import" ]
+ choices: ["present", "absent", "import"]
type: str
target:
description:
- - Location, on the remote host, of the dump file to read from or write to. Uncompressed SQL
- files (C(.sql)) files are supported.
+ - Location, on the remote host, of the dump file to read from or write to. Uncompressed SQL files (C(.sql)) files are
+ supported.
type: str
autocommit:
description:
- - Automatically commit the change only if the import succeed. Sometimes it is necessary to use autocommit=true, since some content can't be changed
- within a transaction.
+ - Automatically commit the change only if the import succeed. Sometimes it is necessary to use autocommit=true, since
+ some content can not be changed within a transaction.
type: bool
default: false
notes:
- - Requires the pymssql Python package on the remote host. For Ubuntu, this
- is as easy as pip install pymssql (See M(ansible.builtin.pip).)
+ - Requires the pymssql Python package on the remote host. For Ubuntu, this is as easy as pip install pymssql (See M(ansible.builtin.pip)).
requirements:
- - pymssql
+ - pymssql
author: Vedit Firat Arig (@vedit)
-'''
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Create a new database with name 'jackdata'
community.general.mssql_db:
name: jackdata
@@ -92,11 +90,11 @@ EXAMPLES = '''
name: my_db
state: import
target: /tmp/dump.sql
-'''
+"""
-RETURN = '''
+RETURN = r"""
#
-'''
+"""
import os
import traceback
@@ -160,7 +158,7 @@ def main():
login_password=dict(default='', no_log=True),
login_host=dict(required=True),
login_port=dict(default='1433'),
- target=dict(default=None),
+ target=dict(),
autocommit=dict(type='bool', default=False),
state=dict(
default='present', choices=['present', 'absent', 'import'])
diff --git a/plugins/modules/mssql_script.py b/plugins/modules/mssql_script.py
index b1713092c8..37bd0853d0 100644
--- a/plugins/modules/mssql_script.py
+++ b/plugins/modules/mssql_script.py
@@ -7,8 +7,7 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
-DOCUMENTATION = r'''
----
+DOCUMENTATION = r"""
module: mssql_script
short_description: Execute SQL scripts on a MSSQL database
@@ -17,77 +16,75 @@ version_added: "4.0.0"
description:
- Execute SQL scripts on a MSSQL database.
-
extends_documentation_fragment:
- community.general.attributes
attributes:
- check_mode:
- support: partial
- details:
- - The script will not be executed in check mode.
- diff_mode:
- support: none
+ check_mode:
+ support: partial
+ details:
+ - The script is not be executed in check mode.
+ diff_mode:
+ support: none
options:
- name:
- description: Database to run script against.
- aliases: [ db ]
- default: ''
- type: str
- login_user:
- description: The username used to authenticate with.
- type: str
- login_password:
- description: The password used to authenticate with.
- type: str
- login_host:
- description: Host running the database.
- type: str
- required: true
- login_port:
- description: Port of the MSSQL server. Requires O(login_host) be defined as well.
- default: 1433
- type: int
- script:
- description:
- - The SQL script to be executed.
- - Script can contain multiple SQL statements. Multiple Batches can be separated by V(GO) command.
- - Each batch must return at least one result set.
- required: true
- type: str
- transaction:
- description:
- - If transactional mode is requested, start a transaction and commit the change only if the script succeed.
- Otherwise, rollback the transaction.
- - If transactional mode is not requested (default), automatically commit the change.
- type: bool
- default: false
- version_added: 8.4.0
- output:
- description:
- - With V(default) each row will be returned as a list of values. See RV(query_results).
- - Output format V(dict) will return dictionary with the column names as keys. See RV(query_results_dict).
- - V(dict) requires named columns to be returned by each query otherwise an error is thrown.
- choices: [ "dict", "default" ]
- default: 'default'
- type: str
- params:
- description: |
- Parameters passed to the script as SQL parameters.
- (Query V('SELECT %(name\)s"') with V(example: '{"name": "John Doe"}).)'
- type: dict
+ name:
+ description: Database to run script against.
+ aliases: [db]
+ default: ''
+ type: str
+ login_user:
+ description: The username used to authenticate with.
+ type: str
+ login_password:
+ description: The password used to authenticate with.
+ type: str
+ login_host:
+ description: Host running the database.
+ type: str
+ required: true
+ login_port:
+ description: Port of the MSSQL server. Requires O(login_host) be defined as well.
+ default: 1433
+ type: int
+ script:
+ description:
+ - The SQL script to be executed.
+ - Script can contain multiple SQL statements. Multiple Batches can be separated by V(GO) command.
+ - Each batch must return at least one result set.
+ required: true
+ type: str
+ transaction:
+ description:
+ - If transactional mode is requested, start a transaction and commit the change only if the script succeed. Otherwise,
+ rollback the transaction.
+ - If transactional mode is not requested (default), automatically commit the change.
+ type: bool
+ default: false
+ version_added: 8.4.0
+ output:
+ description:
+ - With V(default) each row is returned as a list of values. See RV(query_results).
+ - Output format V(dict) returns dictionary with the column names as keys. See RV(query_results_dict).
+ - V(dict) requires named columns to be returned by each query otherwise an error is thrown.
+ choices: ["dict", "default"]
+ default: 'default'
+ type: str
+ params:
+ description: |-
+ Parameters passed to the script as SQL parameters.
+ (Query V('SELECT %(name\)s"') with V(example: '{"name": "John Doe"}).)'.
+ type: dict
notes:
- - Requires the pymssql Python package on the remote host. For Ubuntu, this
- is as easy as C(pip install pymssql) (See M(ansible.builtin.pip).)
+ - Requires the pymssql Python package on the remote host. For Ubuntu, this is as easy as C(pip install pymssql) (See M(ansible.builtin.pip)).
requirements:
- - pymssql
+ - pymssql
author:
- - Kris Budde (@kbudde)
-'''
+ - Kris Budde (@kbudde)
+"""
-EXAMPLES = r'''
+EXAMPLES = r"""
- name: Check DB connection
community.general.mssql_script:
login_user: "{{ mssql_login_user }}"
@@ -140,11 +137,11 @@ EXAMPLES = r'''
register: result_batches
- assert:
that:
- - result_batches.query_results | length == 2 # two batch results
- - result_batches.query_results[0] | length == 2 # two selects in first batch
- - result_batches.query_results[0][0] | length == 1 # one row in first select
- - result_batches.query_results[0][0][0] | length == 1 # one column in first row
- - result_batches.query_results[0][0][0][0] == 'Batch 0 - Select 0' # each row contains a list of values.
+ - result_batches.query_results | length == 2 # two batch results
+ - result_batches.query_results[0] | length == 2 # two selects in first batch
+ - result_batches.query_results[0][0] | length == 1 # one row in first select
+ - result_batches.query_results[0][0][0] | length == 1 # one column in first row
+ - result_batches.query_results[0][0][0][0] == 'Batch 0 - Select 0' # each row contains a list of values.
- name: two batches with dict output
community.general.mssql_script:
@@ -161,68 +158,110 @@ EXAMPLES = r'''
register: result_batches_dict
- assert:
that:
- - result_batches_dict.query_results_dict | length == 2 # two batch results
- - result_batches_dict.query_results_dict[0] | length == 2 # two selects in first batch
- - result_batches_dict.query_results_dict[0][0] | length == 1 # one row in first select
- - result_batches_dict.query_results_dict[0][0][0]['b0s0'] == 'Batch 0 - Select 0' # column 'b0s0' of first row
-'''
+ - result_batches_dict.query_results_dict | length == 2 # two batch results
+ - result_batches_dict.query_results_dict[0] | length == 2 # two selects in first batch
+ - result_batches_dict.query_results_dict[0][0] | length == 1 # one row in first select
+ - result_batches_dict.query_results_dict[0][0][0]['b0s0'] == 'Batch 0 - Select 0' # column 'b0s0' of first row
+"""
-RETURN = r'''
+RETURN = r"""
query_results:
- description: List of batches (queries separated by V(GO) keyword).
- type: list
- elements: list
- returned: success and O(output=default)
- sample: [[[["Batch 0 - Select 0"]], [["Batch 0 - Select 1"]]], [[["Batch 1 - Select 0"]]]]
- contains:
- queries:
- description:
- - List of result sets of each query.
- - If a query returns no results, the results of this and all the following queries will not be included in the output.
- - Use the V(GO) keyword in O(script) to separate queries.
- type: list
- elements: list
- contains:
- rows:
- description: List of rows returned by query.
- type: list
- elements: list
- contains:
- column_value:
- description:
- - List of column values.
- - Any non-standard JSON type is converted to string.
- type: list
- example: ["Batch 0 - Select 0"]
- returned: success, if output is default
+ description: List of batches (queries separated by V(GO) keyword).
+ type: list
+ elements: list
+ returned: success and O(output=default)
+ sample:
+ [
+ [
+ [
+ [
+ "Batch 0 - Select 0"
+ ]
+ ],
+ [
+ [
+ "Batch 0 - Select 1"
+ ]
+ ]
+ ],
+ [
+ [
+ [
+ "Batch 1 - Select 0"
+ ]
+ ]
+ ]
+ ]
+ contains:
+ queries:
+ description:
+ - List of result sets of each query.
+ - If a query returns no results, the results of this and all the following queries are not included in the output.
+ - Use the V(GO) keyword in O(script) to separate queries.
+ type: list
+ elements: list
+ contains:
+ rows:
+ description: List of rows returned by query.
+ type: list
+ elements: list
+ contains:
+ column_value:
+ description:
+ - List of column values.
+ - Any non-standard JSON type is converted to string.
+ type: list
+ example: ["Batch 0 - Select 0"]
+ returned: success, if output is default
query_results_dict:
- description: List of batches (queries separated by V(GO) keyword).
- type: list
- elements: list
- returned: success and O(output=dict)
- sample: [[[["Batch 0 - Select 0"]], [["Batch 0 - Select 1"]]], [[["Batch 1 - Select 0"]]]]
- contains:
- queries:
- description:
- - List of result sets of each query.
- - If a query returns no results, the results of this and all the following queries will not be included in the output.
- Use 'GO' keyword to separate queries.
- type: list
- elements: list
- contains:
- rows:
- description: List of rows returned by query.
- type: list
- elements: list
- contains:
- column_dict:
- description:
- - Dictionary of column names and values.
- - Any non-standard JSON type is converted to string.
- type: dict
- example: {"col_name": "Batch 0 - Select 0"}
- returned: success, if output is dict
-'''
+ description: List of batches (queries separated by V(GO) keyword).
+ type: list
+ elements: list
+ returned: success and O(output=dict)
+ sample:
+ [
+ [
+ [
+ [
+ "Batch 0 - Select 0"
+ ]
+ ],
+ [
+ [
+ "Batch 0 - Select 1"
+ ]
+ ]
+ ],
+ [
+ [
+ [
+ "Batch 1 - Select 0"
+ ]
+ ]
+ ]
+ ]
+ contains:
+ queries:
+ description:
+ - List of result sets of each query.
+ - If a query returns no results, the results of this and all the following queries are not included in the output.
+ Use V(GO) keyword to separate queries.
+ type: list
+ elements: list
+ contains:
+ rows:
+ description: List of rows returned by query.
+ type: list
+ elements: list
+ contains:
+ column_dict:
+ description:
+ - Dictionary of column names and values.
+ - Any non-standard JSON type is converted to string.
+ type: dict
+ example: {"col_name": "Batch 0 - Select 0"}
+ returned: success, if output is dict
+"""
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
import traceback
@@ -243,7 +282,7 @@ def clean_output(o):
def run_module():
module_args = dict(
- name=dict(required=False, aliases=['db'], default=''),
+ name=dict(aliases=['db'], default=''),
login_user=dict(),
login_password=dict(no_log=True),
login_host=dict(required=True),
diff --git a/plugins/modules/nagios.py b/plugins/modules/nagios.py
index 0f1f0b7c50..830a805f87 100644
--- a/plugins/modules/nagios.py
+++ b/plugins/modules/nagios.py
@@ -14,20 +14,14 @@ from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
module: nagios
short_description: Perform common tasks in Nagios related to downtime and notifications
description:
- - "The C(nagios) module has two basic functions: scheduling downtime and toggling alerts for services or hosts."
+ - 'The C(nagios) module has two basic functions: scheduling downtime and toggling alerts for services or hosts.'
- The C(nagios) module is not idempotent.
- - All actions require the O(host) parameter to be given explicitly. In playbooks you can use the C({{inventory_hostname}}) variable to refer
- to the host the playbook is currently running on.
- - You can specify multiple services at once by separating them with commas, .e.g. O(services=httpd,nfs,puppet).
- - When specifying what service to handle there is a special service value, O(host), which will handle alerts/downtime/acknowledge for the I(host itself),
- for example O(services=host). This keyword may not be given with other services at the same time.
- B(Setting alerts/downtime/acknowledge for a host does not affect alerts/downtime/acknowledge for any of the services running on it.)
- To schedule downtime for all services on particular host use keyword "all", for example O(services=all).
+ - All actions require the O(host) parameter to be given explicitly. In playbooks you can use the C({{inventory_hostname}})
+ variable to refer to the host the playbook is currently running on.
extends_documentation_fragment:
- community.general.attributes
attributes:
@@ -41,9 +35,20 @@ options:
- Action to take.
- The V(acknowledge) and V(forced_check) actions were added in community.general 1.2.0.
required: true
- choices: [ "downtime", "delete_downtime", "enable_alerts", "disable_alerts", "silence", "unsilence",
- "silence_nagios", "unsilence_nagios", "command", "servicegroup_service_downtime",
- "servicegroup_host_downtime", "acknowledge", "forced_check" ]
+ choices:
+ - downtime
+ - delete_downtime
+ - enable_alerts
+ - disable_alerts
+ - silence
+ - unsilence
+ - silence_nagios
+ - unsilence_nagios
+ - command
+ - servicegroup_service_downtime
+ - servicegroup_host_downtime
+ - acknowledge
+ - forced_check
type: str
host:
description:
@@ -51,18 +56,16 @@ options:
type: str
cmdfile:
description:
- - Path to the nagios I(command file) (FIFO pipe).
- Only required if auto-detection fails.
+ - Path to the nagios I(command file) (FIFO pipe). Only required if auto-detection fails.
type: str
author:
description:
- - Author to leave downtime comments as.
- Only used when O(action) is V(downtime) or V(acknowledge).
+ - Author to leave downtime comments as. Only used when O(action) is V(downtime) or V(acknowledge).
type: str
default: Ansible
comment:
description:
- - Comment when O(action) is V(downtime) or V(acknowledge).
+ - Comment when O(action) is V(downtime) or V(acknowledge).
type: str
default: Scheduling downtime
start:
@@ -79,8 +82,14 @@ options:
services:
description:
- What to manage downtime/alerts for. Separate multiple services with commas.
- - "B(Required) option when O(action) is one of: V(downtime), V(acknowledge), V(forced_check), V(enable_alerts), V(disable_alerts)."
- aliases: [ "service" ]
+ - 'B(Required) option when O(action) is one of: V(downtime), V(acknowledge), V(forced_check), V(enable_alerts), V(disable_alerts).'
+ - You can specify multiple services at once by separating them with commas, for example O(services=httpd,nfs,puppet).
+ - When specifying what O(services) to handle there is a special service value, V(host), which handles alerts/downtime/acknowledge
+ for the I(host itself), for example O(services=host). This keyword may not be given with other services at the same
+ time. B(Setting alerts/downtime/acknowledge for a host does not affect alerts/downtime/acknowledge for any of the
+ services running on it.) To schedule downtime for all O(services) on particular host use keyword V(all), for example
+ O(services=all).
+ aliases: ["service"]
type: str
servicegroup:
description:
@@ -89,14 +98,14 @@ options:
type: str
command:
description:
- - The raw command to send to nagios, which should not include the submitted time header or the line-feed.
- - B(Required) option when using the V(command) O(action).
+ - The raw command to send to Nagios, which should not include the submitted time header or the line-feed.
+ - B(Required) option when O(action=command).
type: str
author: "Tim Bielawa (@tbielawa)"
-'''
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Set 30 minutes of apache downtime
community.general.nagios:
action: downtime
@@ -245,7 +254,7 @@ EXAMPLES = '''
community.general.nagios:
action: command
command: DISABLE_FAILURE_PREDICTION
-'''
+"""
import time
import os.path
diff --git a/plugins/modules/netcup_dns.py b/plugins/modules/netcup_dns.py
index cba70c0fa3..c48e0a2fb2 100644
--- a/plugins/modules/netcup_dns.py
+++ b/plugins/modules/netcup_dns.py
@@ -9,13 +9,12 @@ from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
module: netcup_dns
notes: []
short_description: Manage Netcup DNS records
description:
- - "Manages DNS records via the Netcup API, see the docs U(https://ccp.netcup.net/run/webservice/servers/endpoint.php)."
+ - Manages DNS records using the Netcup API, see the docs U(https://ccp.netcup.net/run/webservice/servers/endpoint.php).
extends_documentation_fragment:
- community.general.attributes
attributes:
@@ -26,17 +25,17 @@ attributes:
options:
api_key:
description:
- - "API key for authentication, must be obtained via the netcup CCP (U(https://ccp.netcup.net))."
+ - API key for authentication, must be obtained using the netcup CCP (U(https://ccp.netcup.net)).
required: true
type: str
api_password:
description:
- - "API password for authentication, must be obtained via the netcup CCP (U(https://ccp.netcup.net))."
+ - API password for authentication, must be obtained using the netcup CCP (U(https://ccp.netcup.net)).
required: true
type: str
customer_id:
description:
- - Netcup customer id.
+ - Netcup customer ID.
required: true
type: int
domain:
@@ -48,7 +47,7 @@ options:
description:
- Record to add or delete, supports wildcard (V(*)). Default is V(@) (that is, the zone name).
default: "@"
- aliases: [ name ]
+ aliases: [name]
type: str
type:
description:
@@ -69,7 +68,7 @@ options:
default: false
description:
- Whether the record should be the only one for that record type and record name. Only use with O(state=present).
- - This will delete all other records with the same record name and type.
+ - This deletes all other records with the same record name and type.
priority:
description:
- Record priority. Required for O(type=MX).
@@ -80,7 +79,7 @@ options:
- Whether the record should exist or not.
required: false
default: present
- choices: [ 'present', 'absent' ]
+ choices: ['present', 'absent']
type: str
timeout:
description:
@@ -91,10 +90,9 @@ options:
requirements:
- "nc-dnsapi >= 0.1.3"
author: "Nicolai Buchwitz (@nbuchwitz)"
+"""
-'''
-
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Create a record of type A
community.general.netcup_dns:
api_key: "..."
@@ -156,41 +154,41 @@ EXAMPLES = '''
type: "A"
value: "127.0.0.1"
timeout: 30
+"""
-'''
-
-RETURN = '''
+RETURN = r"""
records:
- description: list containing all records
- returned: success
- type: complex
- contains:
- name:
- description: the record name
- returned: success
- type: str
- sample: fancy-hostname
- type:
- description: the record type
- returned: success
- type: str
- sample: A
- value:
- description: the record destination
- returned: success
- type: str
- sample: 127.0.0.1
- priority:
- description: the record priority (only relevant if type=MX)
- returned: success
- type: int
- sample: 0
- id:
- description: internal id of the record
- returned: success
- type: int
- sample: 12345
-'''
+ description: List containing all records.
+ returned: success
+ type: list
+ elements: dict
+ contains:
+ name:
+ description: The record name.
+ returned: success
+ type: str
+ sample: fancy-hostname
+ type:
+ description: The record type.
+ returned: success
+ type: str
+ sample: A
+ value:
+ description: The record destination.
+ returned: success
+ type: str
+ sample: 127.0.0.1
+ priority:
+ description: The record priority (only relevant if RV(records[].type=MX)).
+ returned: success
+ type: int
+ sample: 0
+ id:
+ description: Internal ID of the record.
+ returned: success
+ type: int
+ sample: 12345
+"""
import traceback
@@ -215,15 +213,15 @@ def main():
customer_id=dict(required=True, type='int'),
domain=dict(required=True),
- record=dict(required=False, default='@', aliases=['name']),
+ record=dict(default='@', aliases=['name']),
type=dict(required=True, choices=['A', 'AAAA', 'MX', 'CNAME', 'CAA', 'SRV', 'TXT',
'TLSA', 'NS', 'DS', 'OPENPGPKEY', 'SMIMEA',
'SSHFP']),
value=dict(required=True),
- priority=dict(required=False, type='int'),
- solo=dict(required=False, type='bool', default=False),
- state=dict(required=False, choices=['present', 'absent'], default='present'),
- timeout=dict(required=False, type='int', default=5),
+ priority=dict(type='int'),
+ solo=dict(type='bool', default=False),
+ state=dict(choices=['present', 'absent'], default='present'),
+ timeout=dict(type='int', default=5),
),
supports_check_mode=True
diff --git a/plugins/modules/newrelic_deployment.py b/plugins/modules/newrelic_deployment.py
index e5a1160822..af58402a44 100644
--- a/plugins/modules/newrelic_deployment.py
+++ b/plugins/modules/newrelic_deployment.py
@@ -9,13 +9,12 @@ from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
module: newrelic_deployment
author: "Matt Coddington (@mcodd)"
short_description: Notify New Relic about app deployments
description:
- - Notify New Relic about app deployments (see https://docs.newrelic.com/docs/apm/new-relic-apm/maintenance/record-monitor-deployments/)
+ - Notify New Relic about app deployments (see U(https://docs.newrelic.com/docs/apm/new-relic-apm/maintenance/record-monitor-deployments/)).
extends_documentation_fragment:
- community.general.attributes
attributes:
@@ -44,49 +43,49 @@ options:
changelog:
type: str
description:
- - A list of changes for this deployment
+ - A list of changes for this deployment.
required: false
description:
type: str
description:
- - Text annotation for the deployment - notes for you
+ - Text annotation for the deployment - notes for you.
required: false
revision:
type: str
description:
- - A revision number (e.g., git commit SHA)
+ - A revision number (for example, git commit SHA).
required: true
user:
type: str
description:
- - The name of the user/process that triggered this deployment
+ - The name of the user/process that triggered this deployment.
required: false
validate_certs:
description:
- - If V(false), SSL certificates will not be validated. This should only be used
- on personally controlled sites using self-signed certificates.
+ - If V(false), SSL certificates are not validated. This should only be used on personally controlled sites using self-signed
+ certificates.
required: false
default: true
type: bool
app_name_exact_match:
type: bool
description:
- - If this flag is set to V(true) then the application ID lookup by name would only work for an exact match.
- If set to V(false) it returns the first result.
+ - If this flag is set to V(true) then the application ID lookup by name would only work for an exact match. If set to
+ V(false) it returns the first result.
required: false
default: false
version_added: 7.5.0
requirements: []
-'''
+"""
-EXAMPLES = '''
-- name: Notify New Relic about an app deployment
+EXAMPLES = r"""
+- name: Notify New Relic about an app deployment
community.general.newrelic_deployment:
token: AAAAAA
app_name: myapp
user: ansible deployment
revision: '1.0'
-'''
+"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.urls import fetch_url
@@ -103,14 +102,14 @@ def main():
module = AnsibleModule(
argument_spec=dict(
token=dict(required=True, no_log=True),
- app_name=dict(required=False),
- application_id=dict(required=False),
- changelog=dict(required=False),
- description=dict(required=False),
+ app_name=dict(),
+ application_id=dict(),
+ changelog=dict(),
+ description=dict(),
revision=dict(required=True),
- user=dict(required=False),
+ user=dict(),
validate_certs=dict(default=True, type='bool'),
- app_name_exact_match=dict(required=False, type='bool', default=False),
+ app_name_exact_match=dict(type='bool', default=False),
),
required_one_of=[['app_name', 'application_id']],
required_if=[('app_name_exact_match', True, ['app_name'])],
diff --git a/plugins/modules/nexmo.py b/plugins/modules/nexmo.py
index 39f127f98c..2d3a62b053 100644
--- a/plugins/modules/nexmo.py
+++ b/plugins/modules/nexmo.py
@@ -9,11 +9,11 @@ from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = '''
+DOCUMENTATION = r"""
module: nexmo
-short_description: Send a SMS via nexmo
+short_description: Send a SMS using nexmo
description:
- - Send a SMS message via nexmo
+ - Send a SMS message using nexmo.
author: "Matt Martz (@sivel)"
attributes:
check_mode:
@@ -24,42 +24,41 @@ options:
api_key:
type: str
description:
- - Nexmo API Key
+ - Nexmo API Key.
required: true
api_secret:
type: str
description:
- - Nexmo API Secret
+ - Nexmo API Secret.
required: true
src:
type: int
description:
- - Nexmo Number to send from
+ - Nexmo Number to send from.
required: true
dest:
type: list
elements: int
description:
- - Phone number(s) to send SMS message to
+ - Phone number(s) to send SMS message to.
required: true
msg:
type: str
description:
- - Message to text to send. Messages longer than 160 characters will be
- split into multiple messages
+ - Message text to send. Messages longer than 160 characters are split into multiple messages.
required: true
validate_certs:
description:
- - If V(false), SSL certificates will not be validated. This should only be used
- on personally controlled sites using self-signed certificates.
+ - If V(false), SSL certificates are not validated. This should only be used on personally controlled sites using self-signed
+ certificates.
type: bool
default: true
extends_documentation_fragment:
- ansible.builtin.url
- community.general.attributes
-'''
+"""
-EXAMPLES = """
+EXAMPLES = r"""
- name: Send notification message via Nexmo
community.general.nexmo:
api_key: 640c8a53
diff --git a/plugins/modules/nginx_status_info.py b/plugins/modules/nginx_status_info.py
index 6bbea078b0..7f9865878c 100644
--- a/plugins/modules/nginx_status_info.py
+++ b/plugins/modules/nginx_status_info.py
@@ -9,8 +9,7 @@ from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = r'''
----
+DOCUMENTATION = r"""
module: nginx_status_info
short_description: Retrieve information on nginx status
description:
@@ -34,9 +33,9 @@ options:
notes:
- See U(http://nginx.org/en/docs/http/ngx_http_stub_status_module.html) for more information.
-'''
+"""
-EXAMPLES = r'''
+EXAMPLES = r"""
# Gather status info from nginx on localhost
- name: Get current http stats
community.general.nginx_status_info:
@@ -49,10 +48,9 @@ EXAMPLES = r'''
url: http://localhost/nginx_status
timeout: 20
register: result
-'''
+"""
-RETURN = r'''
----
+RETURN = r"""
active_connections:
description: Active connections.
returned: success
@@ -64,7 +62,8 @@ accepts:
type: int
sample: 81769947
handled:
- description: The total number of handled connections. Generally, the parameter value is the same as accepts unless some resource limits have been reached.
+ description: The total number of handled connections. Generally, the parameter value is the same as accepts unless some
+ resource limits have been reached.
returned: success
type: int
sample: 81769947
@@ -93,7 +92,7 @@ data:
returned: success
type: str
sample: "Active connections: 2340 \nserver accepts handled requests\n 81769947 81769947 144332345 \nReading: 0 Writing: 241 Waiting: 2092 \n"
-'''
+"""
import re
from ansible.module_utils.basic import AnsibleModule
diff --git a/plugins/modules/nictagadm.py b/plugins/modules/nictagadm.py
index 5b81861e8f..07b17bcf9e 100644
--- a/plugins/modules/nictagadm.py
+++ b/plugins/modules/nictagadm.py
@@ -8,8 +8,7 @@
from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = r'''
----
+DOCUMENTATION = r"""
module: nictagadm
short_description: Manage nic tags on SmartOS systems
description:
@@ -26,39 +25,39 @@ attributes:
options:
name:
description:
- - Name of the nic tag.
+ - Name of the nic tag.
required: true
type: str
mac:
description:
- - Specifies the O(mac) address to attach the nic tag to when not creating an O(etherstub).
- - Parameters O(mac) and O(etherstub) are mutually exclusive.
+ - Specifies the O(mac) address to attach the nic tag to when not creating an O(etherstub).
+ - Parameters O(mac) and O(etherstub) are mutually exclusive.
type: str
etherstub:
description:
- - Specifies that the nic tag will be attached to a created O(etherstub).
- - Parameter O(etherstub) is mutually exclusive with both O(mtu), and O(mac).
+ - Specifies that the nic tag is attached to a created O(etherstub).
+ - Parameter O(etherstub) is mutually exclusive with both O(mtu), and O(mac).
type: bool
default: false
mtu:
description:
- - Specifies the size of the O(mtu) of the desired nic tag.
- - Parameters O(mtu) and O(etherstub) are mutually exclusive.
+ - Specifies the size of the O(mtu) of the desired nic tag.
+ - Parameters O(mtu) and O(etherstub) are mutually exclusive.
type: int
force:
description:
- - When O(state=absent) this switch will use the C(-f) parameter and delete the nic tag regardless of existing VMs.
+ - When O(state=absent) this switch uses the C(-f) parameter and delete the nic tag regardless of existing VMs.
type: bool
default: false
state:
description:
- - Create or delete a SmartOS nic tag.
+ - Create or delete a SmartOS nic tag.
type: str
- choices: [ absent, present ]
+ choices: [absent, present]
default: present
-'''
+"""
-EXAMPLES = r'''
+EXAMPLES = r"""
- name: Create 'storage0' on '00:1b:21:a3:f5:4d'
community.general.nictagadm:
name: storage0
@@ -70,11 +69,11 @@ EXAMPLES = r'''
community.general.nictagadm:
name: storage0
state: absent
-'''
+"""
-RETURN = r'''
+RETURN = r"""
name:
- description: nic tag name
+ description: Nic tag name.
returned: always
type: str
sample: storage0
@@ -84,26 +83,26 @@ mac:
type: str
sample: 00:1b:21:a3:f5:4d
etherstub:
- description: specifies if the nic tag will create and attach to an etherstub.
+ description: Specifies if the nic tag was created and attached to an etherstub.
returned: always
type: bool
sample: false
mtu:
- description: specifies which MTU size was passed during the nictagadm add command. mtu and etherstub are mutually exclusive.
+ description: Specifies which MTU size was passed during the nictagadm add command. mtu and etherstub are mutually exclusive.
returned: always
type: int
sample: 1500
force:
- description: Shows if -f was used during the deletion of a nic tag
+ description: Shows if C(-f) was used during the deletion of a nic tag.
returned: always
type: bool
sample: false
state:
- description: state of the target
+ description: State of the target.
returned: always
type: str
sample: present
-'''
+"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.common.network import is_mac
diff --git a/plugins/modules/nmcli.py b/plugins/modules/nmcli.py
index d80343ac34..0d35e5aacc 100644
--- a/plugins/modules/nmcli.py
+++ b/plugins/modules/nmcli.py
@@ -9,1058 +9,1152 @@
from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = r'''
----
+DOCUMENTATION = r"""
module: nmcli
author:
- - Chris Long (@alcamie101)
+ - Chris Long (@alcamie101)
short_description: Manage Networking
requirements:
- - nmcli
+ - nmcli
extends_documentation_fragment:
- - community.general.attributes
+ - community.general.attributes
description:
- - 'Manage the network devices. Create, modify and manage various connection and device type e.g., ethernet, teams, bonds, vlans etc.'
- - 'On CentOS 8 and Fedora >=29 like systems, the requirements can be met by installing the following packages: NetworkManager.'
- - 'On CentOS 7 and Fedora <=28 like systems, the requirements can be met by installing the following packages: NetworkManager-tui.'
- - 'On Ubuntu and Debian like systems, the requirements can be met by installing the following packages: network-manager'
- - 'On openSUSE, the requirements can be met by installing the following packages: NetworkManager.'
+ - Manage the network devices. Create, modify and manage various connection and device type, for example V(ethernet), V(team),
+ V(bond), V(vlan) and so on.
+ - 'On CentOS 8 and Fedora >=29 like systems, the requirements can be met by installing the following packages: C(NetworkManager).'
+ - 'On CentOS 7 and Fedora <=28 like systems, the requirements can be met by installing the following packages: C(NetworkManager-tui).'
+ - 'On Ubuntu and Debian like systems, the requirements can be met by installing the following packages: C(network-manager).'
+ - 'On openSUSE, the requirements can be met by installing the following packages: C(NetworkManager).'
attributes:
- check_mode:
- support: full
- diff_mode:
- support: full
+ check_mode:
+ support: full
+ diff_mode:
+ support: full
options:
- state:
+ state:
+ description:
+ - Whether the device should exist or not, taking action if the state is different from what is stated.
+ - Using O(state=present) creates connection set to be brought up automatically.
+ - Using O(state=up) and O(state=down) does not modify connection with other parameters. These states have been added
+ in community.general 9.5.0.
+ type: str
+ required: true
+ choices: [absent, present, up, down]
+ autoconnect:
+ description:
+ - Whether the connection should start on boot.
+ - Whether the connection profile can be automatically activated.
+ type: bool
+ default: true
+ autoconnect_priority:
+ description:
+ - The priority of the connection profile for autoconnect. If set, connection profiles with higher priority are preferred.
+ type: int
+ version_added: 11.0.0
+ autoconnect_retries:
+ description:
+ - The number of times to retry autoconnecting.
+ type: int
+ version_added: 11.0.0
+ conn_name:
+ description:
+ - The name used to call the connection. Pattern is V([-][-]).
+ type: str
+ required: true
+ conn_reload:
+ description:
+ - Whether the connection should be reloaded if it was modified.
+ type: bool
+ required: false
+ default: false
+ version_added: 9.5.0
+ ifname:
+ description:
+ - The interface to bind the connection to.
+ - The connection is only applicable to this interface name.
+ - A special value of V(*) can be used for interface-independent connections.
+ - The O(ifname) argument is mandatory for all connection types except bond, team, bridge, vlan and vpn.
+ - This parameter defaults to O(conn_name) when left unset for all connection types except vpn that removes it.
+ type: str
+ type:
+ description:
+ - This is the type of device or network connection that you wish to create or modify.
+ - Type V(dummy) is added in community.general 3.5.0.
+ - Type V(gsm) is added in community.general 3.7.0.
+ - Type V(infiniband) is added in community.general 2.0.0.
+ - Type V(loopback) is added in community.general 8.1.0.
+ - Type V(macvlan) is added in community.general 6.6.0.
+ - Type V(ovs-bridge) is added in community.general 8.6.0.
+ - Type V(ovs-interface) is added in community.general 8.6.0.
+ - Type V(ovs-port) is added in community.general 8.6.0.
+ - Type V(wireguard) is added in community.general 4.3.0.
+ - Type V(vpn) is added in community.general 5.1.0.
+ - Type V(vrf) is added in community.general 10.4.0.
+ - Using V(bond-slave), V(bridge-slave), or V(team-slave) implies V(ethernet) connection type with corresponding O(slave_type)
+ option.
+ - If you want to control non-ethernet connection attached to V(bond), V(bridge), or V(team) consider using O(slave_type)
+ option.
+ type: str
+ choices:
+ - bond
+ - bond-slave
+ - bridge
+ - bridge-slave
+ - dummy
+ - ethernet
+ - generic
+ - gre
+ - infiniband
+ - ipip
+ - macvlan
+ - sit
+ - team
+ - team-slave
+ - vlan
+ - vxlan
+ - wifi
+ - gsm
+ - wireguard
+ - ovs-bridge
+ - ovs-port
+ - ovs-interface
+ - vpn
+ - vrf
+ - loopback
+ mode:
+ description:
+ - This is the type of device or network connection that you wish to create for a bond or bridge.
+ type: str
+ choices: [802.3ad, active-backup, balance-alb, balance-rr, balance-tlb, balance-xor, broadcast]
+ default: balance-rr
+ transport_mode:
+ description:
+ - This option sets the connection type of Infiniband IPoIB devices.
+ type: str
+ choices: [datagram, connected]
+ version_added: 5.8.0
+ infiniband_mac:
+ description:
+ - MAC address of the Infiniband IPoIB devices.
+ type: str
+ version_added: 10.6.0
+ slave_type:
+ description:
+ - Type of the device of this slave's master connection (for example V(bond)).
+ - Type V(ovs-port) is added in community.general 8.6.0.
+ type: str
+ choices: ['bond', 'bridge', 'team', 'ovs-port', 'vrf']
+ version_added: 7.0.0
+ master:
+ description:
+ - Master [-][-].
+ - The Type Of Service.
+ type: int
+ route_metric4:
+ description:
+ - Set metric level of ipv4 routes configured on interface.
+ type: int
+ version_added: 2.0.0
+ routing_rules4:
+ description:
+ - Is the same as in an C(ip rule add) command, except always requires specifying a priority.
+ type: list
+ elements: str
+ version_added: 3.3.0
+ never_default4:
+ description:
+ - Set as default route.
+ - This parameter is mutually_exclusive with gw4 parameter.
+ type: bool
+ default: false
+ version_added: 2.0.0
+ dns4:
+ description:
+ - A list of up to 3 DNS servers.
+ - The entries must be IPv4 addresses, for example V(192.0.2.53).
+ elements: str
+ type: list
+ dns4_search:
+ description:
+ - A list of DNS search domains.
+ elements: str
+ type: list
+ dns4_options:
+ description:
+ - A list of DNS options.
+ elements: str
+ type: list
+ version_added: 7.2.0
+ dns4_ignore_auto:
+ description:
+ - Ignore automatically configured IPv4 name servers.
+ type: bool
+ default: false
+ version_added: 3.2.0
+ method4:
+ description:
+ - Configuration method to be used for IPv4.
+ - If O(ip4) is set, C(ipv4.method) is automatically set to V(manual) and this parameter is not needed.
+ type: str
+ choices: [auto, link-local, manual, shared, disabled]
+ version_added: 2.2.0
+ may_fail4:
+ description:
+ - If you need O(ip4) configured before C(network-online.target) is reached, set this option to V(false).
+ - This option applies when O(method4) is not V(disabled).
+ type: bool
+ default: true
+ version_added: 3.3.0
+ ip6:
+ description:
+ - List of IPv6 addresses to this interface.
+ - Use the format V(abbe::cafe/128) or V(abbe::cafe).
+ - If defined and O(method6) is not specified, automatically set C(ipv6.method) to V(manual).
+ type: list
+ elements: str
+ gw6:
+ description:
+ - The IPv6 gateway for this interface.
+ - Use the format V(2001:db8::1).
+ type: str
+ gw6_ignore_auto:
+ description:
+ - Ignore automatically configured IPv6 routes.
+ type: bool
+ default: false
+ version_added: 3.2.0
+ routes6:
+ description:
+ - The list of IPv6 routes.
+ - Use the format V(fd12:3456:789a:1::/64 2001:dead:beef::1).
+ - To specify more complex routes, use the O(routes6_extended) option.
+ type: list
+ elements: str
+ version_added: 4.4.0
+ routes6_extended:
+ description:
+ - The list of IPv6 routes but with parameters.
+ type: list
+ elements: dict
+ suboptions:
+ ip:
+ description:
+ - IP or prefix of route.
+ - Use the format V(fd12:3456:789a:1::/64).
type: str
required: true
- conn_reload:
+ next_hop:
description:
- - Whether the connection should be reloaded if it was modified.
+ - Use the format V(2001:dead:beef::1).
+ type: str
+ metric:
+ description:
+ - Route metric.
+ type: int
+ table:
+ description:
+ - The table to add this route to.
+ - The default depends on C(ipv6.route-table).
+ type: int
+ cwnd:
+ description:
+ - The clamp for congestion window.
+ type: int
+ mtu:
+ description:
+ - If non-zero, only transmit packets of the specified size or smaller.
+ type: int
+ onlink:
+ description:
+ - Pretend that the nexthop is directly attached to this link, even if it does not match any interface prefix.
type: bool
- required: false
- default: false
- version_added: 9.5.0
- ifname:
+ route_metric6:
+ description:
+ - Set metric level of IPv6 routes configured on interface.
+ type: int
+ version_added: 4.4.0
+ dns6:
+ description:
+ - A list of up to 3 DNS servers.
+ - The entries must be IPv6 addresses, for example V(2001:4860:4860::8888).
+ elements: str
+ type: list
+ dns6_search:
+ description:
+ - A list of DNS search domains.
+ elements: str
+ type: list
+ dns6_options:
+ description:
+ - A list of DNS options.
+ elements: str
+ type: list
+ version_added: 7.2.0
+ dns6_ignore_auto:
+ description:
+ - Ignore automatically configured IPv6 name servers.
+ type: bool
+ default: false
+ version_added: 3.2.0
+ method6:
+ description:
+ - Configuration method to be used for IPv6.
+ - If O(ip6) is set, C(ipv6.method) is automatically set to V(manual) and this parameter is not needed.
+ - V(disabled) was added in community.general 3.3.0.
+ type: str
+ choices: [ignore, auto, dhcp, link-local, manual, shared, disabled]
+ version_added: 2.2.0
+ ip_privacy6:
+ description:
+ - If enabled, it makes the kernel generate a temporary IPv6 address in addition to the public one.
+ type: str
+ choices: [disabled, prefer-public-addr, prefer-temp-addr, unknown]
+ version_added: 4.2.0
+ addr_gen_mode6:
+ description:
+ - Configure method for creating the address for use with IPv6 Stateless Address Autoconfiguration.
+ - V(default) and V(default-or-eui64) have been added in community.general 6.5.0.
+ type: str
+ choices: [default, default-or-eui64, eui64, stable-privacy]
+ version_added: 4.2.0
+ mtu:
+ description:
+ - The connection MTU, for example V(9000). This can not be applied when creating the interface and is done once the
+ interface has been created.
+ - Can be used when modifying Team, VLAN, Ethernet (Future plans to implement wifi, gsm, pppoe, infiniband).
+ - This parameter defaults to V(1500) when unset.
+ type: int
+ dhcp_client_id:
+ description:
+ - DHCP Client Identifier sent to the DHCP server.
+ type: str
+ primary:
+ description:
+ - This is only used with bond and is the primary interface name (for "active-backup" mode), this is the usually the
+ 'ifname'.
+ type: str
+ miimon:
+ description:
+ - This is only used with bond - miimon.
+ - This parameter defaults to V(100) when unset.
+ type: int
+ downdelay:
+ description:
+ - This is only used with bond - downdelay.
+ type: int
+ updelay:
+ description:
+ - This is only used with bond - updelay.
+ type: int
+ xmit_hash_policy:
+ description:
+ - This is only used with bond - xmit_hash_policy type.
+ type: str
+ version_added: 5.6.0
+ fail_over_mac:
+ description:
+ - This is only used with bond - fail_over_mac.
+ type: str
+ choices: [none, active, follow]
+ version_added: 10.3.0
+ arp_interval:
+ description:
+ - This is only used with bond - ARP interval.
+ type: int
+ arp_ip_target:
+ description:
+ - This is only used with bond - ARP IP target.
+ type: str
+ stp:
+ description:
+ - This is only used with bridge and controls whether Spanning Tree Protocol (STP) is enabled for this bridge.
+ type: bool
+ default: true
+ priority:
+ description:
+ - This is only used with 'bridge' - sets STP priority.
+ type: int
+ default: 128
+ forwarddelay:
+ description:
+ - This is only used with bridge - [forward-delay <2-30>] STP forwarding delay, in seconds.
+ type: int
+ default: 15
+ hellotime:
+ description:
+ - This is only used with bridge - [hello-time <1-10>] STP hello time, in seconds.
+ type: int
+ default: 2
+ maxage:
+ description:
+ - This is only used with bridge - [max-age <6-42>] STP maximum message age, in seconds.
+ type: int
+ default: 20
+ ageingtime:
+ description:
+ - This is only used with bridge - [ageing-time <0-1000000>] the Ethernet MAC address aging time, in seconds.
+ type: int
+ default: 300
+ mac:
+ description:
+ - MAC address of the connection.
+ - Note this requires a recent kernel feature, originally introduced in 3.15 upstream kernel.
+ type: str
+ slavepriority:
+ description:
+ - This is only used with 'bridge-slave' - [<0-63>] - STP priority of this slave.
+ type: int
+ default: 32
+ path_cost:
+ description:
+ - This is only used with 'bridge-slave' - [<1-65535>] - STP port cost for destinations using this slave.
+ type: int
+ default: 100
+ hairpin:
+ description:
+ - This is only used with 'bridge-slave' - 'hairpin mode' for the slave, which allows frames to be sent back out through
+ the slave the frame was received on.
+ - The default change to V(false) in community.general 7.0.0. It used to be V(true) before.
+ type: bool
+ default: false
+ runner:
+ description:
+ - This is the type of device or network connection that you wish to create for a team.
+ type: str
+ choices: [broadcast, roundrobin, activebackup, loadbalance, lacp]
+ default: roundrobin
+ version_added: 3.4.0
+ runner_hwaddr_policy:
+ description:
+ - This defines the policy of how hardware addresses of team device and port devices should be set during the team lifetime.
+ type: str
+ choices: [same_all, by_active, only_active]
+ version_added: 3.4.0
+ runner_fast_rate:
+ description:
+ - Option specifies the rate at which our link partner is asked to transmit LACPDU packets. If this is V(true) then packets
+ are sent once per second. Otherwise they are sent every 30 seconds.
+ - Only allowed for O(runner=lacp).
+ type: bool
+ version_added: 6.5.0
+ vlanid:
+ description:
+ - This is only used with VLAN - VLAN ID in range <0-4095>.
+ type: int
+ vlandev:
+ description:
+ - This is only used with VLAN - parent device this VLAN is on, can use ifname.
+ type: str
+ flags:
+ description:
+ - This is only used with VLAN - flags.
+ type: str
+ ingress:
+ description:
+ - This is only used with VLAN - VLAN ingress priority mapping.
+ type: str
+ egress:
+ description:
+ - This is only used with VLAN - VLAN egress priority mapping.
+ type: str
+ vxlan_id:
+ description:
+ - This is only used with VXLAN - VXLAN ID.
+ type: int
+ vxlan_remote:
+ description:
+ - This is only used with VXLAN - VXLAN destination IP address.
+ type: str
+ vxlan_local:
+ description:
+ - This is only used with VXLAN - VXLAN local IP address.
+ type: str
+ ip_tunnel_dev:
+ description:
+ - This is used with GRE/IPIP/SIT - parent device this GRE/IPIP/SIT tunnel, can use ifname.
+ type: str
+ ip_tunnel_remote:
+ description:
+ - This is used with GRE/IPIP/SIT - GRE/IPIP/SIT destination IP address.
+ type: str
+ ip_tunnel_local:
+ description:
+ - This is used with GRE/IPIP/SIT - GRE/IPIP/SIT local IP address.
+ type: str
+ ip_tunnel_input_key:
+ description:
+ - The key used for tunnel input packets.
+ - Only used when O(type=gre).
+ type: str
+ version_added: 3.6.0
+ ip_tunnel_output_key:
+ description:
+ - The key used for tunnel output packets.
+ - Only used when O(type=gre).
+ type: str
+ version_added: 3.6.0
+ table:
+ description:
+ - This is only used with VRF - VRF table number.
+ type: int
+ version_added: 10.4.0
+ zone:
+ description:
+ - The trust level of the connection.
+ - When updating this property on a currently activated connection, the change takes effect immediately.
+ type: str
+ version_added: 2.0.0
+ wifi_sec:
+ description:
+ - The security configuration of the WiFi connection.
+ - Note the list of suboption attributes may vary depending on which version of NetworkManager/nmcli is installed on
+ the host.
+ - 'An up-to-date list of supported attributes can be found here: U(https://networkmanager.dev/docs/api/latest/settings-802-11-wireless-security.html).'
+ - 'For instance to use common WPA-PSK auth with a password: V({key-mgmt: wpa-psk, psk: my_password}).'
+ type: dict
+ suboptions:
+ auth-alg:
description:
- - The interface to bind the connection to.
- - The connection will only be applicable to this interface name.
- - A special value of V('*') can be used for interface-independent connections.
- - The ifname argument is mandatory for all connection types except bond, team, bridge, vlan and vpn.
- - This parameter defaults to O(conn_name) when left unset for all connection types except vpn that removes it.
+ - When WEP is used (that is, if O(wifi_sec.key-mgmt) is V(none) or V(ieee8021x)) indicate the 802.11 authentication
+ algorithm required by the AP here.
+ - One of V(open) for Open System, V(shared) for Shared Key, or V(leap) for Cisco LEAP.
+ - When using Cisco LEAP (that is, if O(wifi_sec.key-mgmt=ieee8021x) and O(wifi_sec.auth-alg=leap)) the O(wifi_sec.leap-username)
+ and O(wifi_sec.leap-password) properties must be specified.
type: str
- type:
+ choices: [open, shared, leap]
+ fils:
description:
- - This is the type of device or network connection that you wish to create or modify.
- - Type V(dummy) is added in community.general 3.5.0.
- - Type V(gsm) is added in community.general 3.7.0.
- - Type V(infiniband) is added in community.general 2.0.0.
- - Type V(loopback) is added in community.general 8.1.0.
- - Type V(macvlan) is added in community.general 6.6.0.
- - Type V(ovs-bridge) is added in community.general 8.6.0.
- - Type V(ovs-interface) is added in community.general 8.6.0.
- - Type V(ovs-port) is added in community.general 8.6.0.
- - Type V(wireguard) is added in community.general 4.3.0.
- - Type V(vpn) is added in community.general 5.1.0.
- - Using V(bond-slave), V(bridge-slave), or V(team-slave) implies V(ethernet) connection type with corresponding O(slave_type) option.
- - If you want to control non-ethernet connection attached to V(bond), V(bridge), or V(team) consider using O(slave_type) option.
- type: str
- choices: [ bond, bond-slave, bridge, bridge-slave, dummy, ethernet, generic, gre, infiniband, ipip, macvlan, sit, team, team-slave, vlan, vxlan,
- wifi, gsm, wireguard, ovs-bridge, ovs-port, ovs-interface, vpn, loopback ]
- mode:
+ - Indicates whether Fast Initial Link Setup (802.11ai) must be enabled for the connection.
+ - One of V(0) (use global default value), V(1) (disable FILS), V(2) (enable FILS if the supplicant and the access
+ point support it) or V(3) (enable FILS and fail if not supported).
+ - When set to V(0) and no global default is set, FILS is optionally enabled.
+ type: int
+ choices: [0, 1, 2, 3]
+ default: 0
+ group:
description:
- - This is the type of device or network connection that you wish to create for a bond or bridge.
- type: str
- choices: [ 802.3ad, active-backup, balance-alb, balance-rr, balance-tlb, balance-xor, broadcast ]
- default: balance-rr
- transport_mode:
- description:
- - This option sets the connection type of Infiniband IPoIB devices.
- type: str
- choices: [ datagram, connected ]
- version_added: 5.8.0
- slave_type:
- description:
- - Type of the device of this slave's master connection (for example V(bond)).
- - Type V(ovs-port) is added in community.general 8.6.0.
- type: str
- choices: [ 'bond', 'bridge', 'team', 'ovs-port' ]
- version_added: 7.0.0
- master:
- description:
- - Master ] STP forwarding delay, in seconds.
+ - If non-zero, only transmit packets of the specified size or smaller, breaking larger packets up into multiple
+ fragments.
+ - If zero a default MTU is used. Note that contrary to wg-quick's MTU setting, this does not take into account the
+ current routes at the time of activation.
type: int
- default: 15
- hellotime:
+ peer-routes:
description:
- - This is only used with bridge - [hello-time <1-10>] STP hello time, in seconds.
- type: int
- default: 2
- maxage:
- description:
- - This is only used with bridge - [max-age <6-42>] STP maximum message age, in seconds.
- type: int
- default: 20
- ageingtime:
- description:
- - This is only used with bridge - [ageing-time <0-1000000>] the Ethernet MAC address aging time, in seconds.
- type: int
- default: 300
- mac:
- description:
- - MAC address of the connection.
- - Note this requires a recent kernel feature, originally introduced in 3.15 upstream kernel.
- type: str
- slavepriority:
- description:
- - This is only used with 'bridge-slave' - [<0-63>] - STP priority of this slave.
- type: int
- default: 32
- path_cost:
- description:
- - This is only used with 'bridge-slave' - [<1-65535>] - STP port cost for destinations via this slave.
- type: int
- default: 100
- hairpin:
- description:
- - This is only used with 'bridge-slave' - 'hairpin mode' for the slave, which allows frames to be sent back out through the slave the
- frame was received on.
- - The default change to V(false) in community.general 7.0.0. It used to be V(true) before.
+ - Whether to automatically add routes for the AllowedIPs ranges of the peers.
+ - If V(true) (the default), NetworkManager automatically adds routes in the routing tables according to C(ipv4.route-table)
+ and C(ipv6.route-table). Usually you want this automatism enabled.
+ - If V(false), no such routes are added automatically. In this case, the user may want to configure static routes
+ in C(ipv4.routes) and C(ipv6.routes), respectively.
+ - Note that if the peer's AllowedIPs is V(0.0.0.0/0) or V(::/0) and the profile's C(ipv4.never-default) or C(ipv6.never-default)
+ setting is enabled, the peer route for this peer is not added automatically.
type: bool
- default: false
- runner:
- description:
- - This is the type of device or network connection that you wish to create for a team.
+ private-key:
+ description: The 256 bit private-key in base64 encoding.
type: str
- choices: [ broadcast, roundrobin, activebackup, loadbalance, lacp ]
- default: roundrobin
- version_added: 3.4.0
- runner_hwaddr_policy:
- description:
- - This defines the policy of how hardware addresses of team device and port devices
- should be set during the team lifetime.
+ private-key-flags:
+ description: C(NMSettingSecretFlags) indicating how to handle the O(wireguard.private-key) property.
+ type: int
+ choices: [0, 1, 2]
+ vpn:
+ description:
+ - Configuration of a VPN connection (PPTP and L2TP).
+ - In order to use L2TP you need to be sure that C(network-manager-l2tp) - and C(network-manager-l2tp-gnome) if host
+ has UI - are installed on the host.
+ type: dict
+ version_added: 5.1.0
+ suboptions:
+ permissions:
+ description: User that has permission to use the connection.
type: str
- choices: [ same_all, by_active, only_active ]
- version_added: 3.4.0
- runner_fast_rate:
+ required: true
+ service-type:
+ description: This defines the service type of connection.
+ type: str
+ required: true
+ gateway:
+ description: The gateway to connection. It can be an IP address (for example V(192.0.2.1)) or a FQDN address (for
+ example V(vpn.example.com)).
+ type: str
+ required: true
+ password-flags:
description:
- - Option specifies the rate at which our link partner is asked to transmit LACPDU
- packets. If this is V(true) then packets will be sent once per second. Otherwise they
- will be sent every 30 seconds.
- - Only allowed for O(runner=lacp).
+ - NMSettingSecretFlags indicating how to handle the C(vpn.password) property.
+ - 'Following choices are allowed: V(0) B(NONE): The system is responsible for providing and storing this secret
+ (default); V(1) B(AGENT_OWNED): A user secret agent is responsible for providing and storing this secret; when
+ it is required agents are asked to retrieve it; V(2) B(NOT_SAVED): This secret should not be saved, but should
+ be requested from the user each time it is needed; V(4) B(NOT_REQUIRED): In situations where it cannot be automatically
+ determined that the secret is required (some VPNs and PPP providers do not require all secrets) this flag indicates
+ that the specific secret is not required.'
+ type: int
+ choices: [0, 1, 2, 4]
+ default: 0
+ user:
+ description: Username provided by VPN administrator.
+ type: str
+ required: true
+ ipsec-enabled:
+ description:
+ - Enable or disable IPSec tunnel to L2TP host.
+ - This option is need when O(vpn.service-type) is V(org.freedesktop.NetworkManager.l2tp).
type: bool
- version_added: 6.5.0
- vlanid:
+ ipsec-psk:
description:
- - This is only used with VLAN - VLAN ID in range <0-4095>.
+ - The pre-shared key in base64 encoding.
+ - >
+ You can encode using this Ansible Jinja2 expression: V("0s{{ '[YOUR PRE-SHARED KEY]' | ansible.builtin.b64encode }}").
+ - This is only used when O(vpn.ipsec-enabled=true).
+ type: str
+ sriov:
+ description:
+ - Allow to configure SR-IOV settings.
+ - 'An up-to-date list of supported attributes can be found here:
+ U(https://networkmanager.pages.freedesktop.org/NetworkManager/NetworkManager/settings-sriov.html).'
+ type: dict
+ version_added: 10.1.0
+ suboptions:
+ autoprobe-drivers:
+ description:
+ - Whether to autoprobe virtual functions by a compatible driver.
type: int
- vlandev:
+ eswitch-encap-mode:
description:
- - This is only used with VLAN - parent device this VLAN is on, can use ifname.
- type: str
- flags:
- description:
- - This is only used with VLAN - flags.
- type: str
- ingress:
- description:
- - This is only used with VLAN - VLAN ingress priority mapping.
- type: str
- egress:
- description:
- - This is only used with VLAN - VLAN egress priority mapping.
- type: str
- vxlan_id:
- description:
- - This is only used with VXLAN - VXLAN ID.
+ - Select the eswitch encapsulation support.
type: int
- vxlan_remote:
- description:
- - This is only used with VXLAN - VXLAN destination IP address.
- type: str
- vxlan_local:
- description:
- - This is only used with VXLAN - VXLAN local IP address.
- type: str
- ip_tunnel_dev:
+ eswitch-inline-mode:
description:
- - This is used with GRE/IPIP/SIT - parent device this GRE/IPIP/SIT tunnel, can use ifname.
+ - Select the eswitch inline-mode of the device.
+ type: int
+ eswitch-mode:
+ description:
+ - Select the eswitch mode of the device.
+ type: int
+ total-vfs:
+ description: Number of virtual functions to create. Consult your NIC documentation for the maximum number of VFs supported.
+ type: int
+ vfs:
+ description:
+ - 'Virtual function descriptors in the form: V(INDEX [ATTR=VALUE[ ATTR=VALUE]...]).'
+ - Multiple VFs can be specified using a comma as separator, for example V(2 mac=00:11:22:33:44:55 spoof-check=true,3
+ vlans=100).
type: str
- ip_tunnel_remote:
- description:
- - This is used with GRE/IPIP/SIT - GRE/IPIP/SIT destination IP address.
- type: str
- ip_tunnel_local:
- description:
- - This is used with GRE/IPIP/SIT - GRE/IPIP/SIT local IP address.
- type: str
- ip_tunnel_input_key:
- description:
- - The key used for tunnel input packets.
- - Only used when O(type=gre).
- type: str
- version_added: 3.6.0
- ip_tunnel_output_key:
- description:
- - The key used for tunnel output packets.
- - Only used when O(type=gre).
- type: str
- version_added: 3.6.0
- zone:
- description:
- - The trust level of the connection.
- - When updating this property on a currently activated connection, the change takes effect immediately.
- type: str
- version_added: 2.0.0
- wifi_sec:
- description:
- - The security configuration of the WiFi connection.
- - Note the list of suboption attributes may vary depending on which version of NetworkManager/nmcli is installed on the host.
- - 'An up-to-date list of supported attributes can be found here:
- U(https://networkmanager.dev/docs/api/latest/settings-802-11-wireless-security.html).'
- - 'For instance to use common WPA-PSK auth with a password:
- V({key-mgmt: wpa-psk, psk: my_password}).'
- type: dict
- suboptions:
- auth-alg:
- description:
- - When WEP is used (that is, if O(wifi_sec.key-mgmt) is V(none) or V(ieee8021x)) indicate the 802.11
- authentication algorithm required by the AP here.
- - One of V(open) for Open System, V(shared) for Shared Key, or V(leap) for Cisco LEAP.
- - When using Cisco LEAP (that is, if O(wifi_sec.key-mgmt=ieee8021x) and O(wifi_sec.auth-alg=leap))
- the O(wifi_sec.leap-username) and O(wifi_sec.leap-password) properties
- must be specified.
- type: str
- choices: [ open, shared, leap ]
- fils:
- description:
- - Indicates whether Fast Initial Link Setup (802.11ai) must be enabled for the connection.
- - One of V(0) (use global default value), V(1) (disable FILS), V(2) (enable FILS if the supplicant and the access point support it) or V(3)
- (enable FILS and fail if not supported).
- - When set to V(0) and no global default is set, FILS will be optionally enabled.
- type: int
- choices: [ 0, 1, 2, 3 ]
- default: 0
- group:
- description:
- - A list of group/broadcast encryption algorithms which prevents connections to Wi-Fi networks that do not utilize one of the algorithms in
- the list.
- - For maximum compatibility leave this property empty.
- type: list
- elements: str
- choices: [ wep40, wep104, tkip, ccmp ]
- key-mgmt:
- description:
- - Key management used for the connection.
- - One of V(none) (WEP or no password protection), V(ieee8021x) (Dynamic WEP), V(owe) (Opportunistic Wireless Encryption), V(wpa-psk) (WPA2
- + WPA3 personal), V(sae) (WPA3 personal only), V(wpa-eap) (WPA2 + WPA3 enterprise) or V(wpa-eap-suite-b-192) (WPA3 enterprise only).
- - This property must be set for any Wi-Fi connection that uses security.
- type: str
- choices: [ none, ieee8021x, owe, wpa-psk, sae, wpa-eap, wpa-eap-suite-b-192 ]
- leap-password-flags:
- description: Flags indicating how to handle the O(wifi_sec.leap-password) property.
- type: list
- elements: int
- leap-password:
- description: The login password for legacy LEAP connections (that is, if O(wifi_sec.key-mgmt=ieee8021x) and O(wifi_sec.auth-alg=leap)).
- type: str
- leap-username:
- description: The login username for legacy LEAP connections (that is, if O(wifi_sec.key-mgmt=ieee8021x) and O(wifi_sec.auth-alg=leap)).
- type: str
- pairwise:
- description:
- - A list of pairwise encryption algorithms which prevents connections to Wi-Fi networks that do not utilize one of the algorithms in the
- list.
- - For maximum compatibility leave this property empty.
- type: list
- elements: str
- choices: [ tkip, ccmp ]
- pmf:
- description:
- - Indicates whether Protected Management Frames (802.11w) must be enabled for the connection.
- - One of V(0) (use global default value), V(1) (disable PMF), V(2) (enable PMF if the
- supplicant and the access point support it) or V(3) (enable PMF and fail if not supported).
- - When set to V(0) and no global default is set, PMF will be optionally enabled.
- type: int
- choices: [ 0, 1, 2, 3 ]
- default: 0
- proto:
- description:
- - List of strings specifying the allowed WPA protocol versions to use.
- - Each element may be V(wpa) (allow WPA) or V(rsn) (allow WPA2/RSN).
- - If not specified, both WPA and RSN connections are allowed.
- type: list
- elements: str
- choices: [ wpa, rsn ]
- psk-flags:
- description: Flags indicating how to handle the O(wifi_sec.psk) property.
- type: list
- elements: int
- psk:
- description:
- - Pre-Shared-Key for WPA networks.
- - For WPA-PSK, it is either an ASCII passphrase of 8 to 63 characters that is
- (as specified in the 802.11i standard) hashed to derive the
- actual key, or the key in form of 64 hexadecimal character.
- - The WPA3-Personal networks use a passphrase of any length for SAE authentication.
- type: str
- wep-key-flags:
- description:
- - Flags indicating how to handle the O(wifi_sec.wep-key0), O(wifi_sec.wep-key1),
- O(wifi_sec.wep-key2), and O(wifi_sec.wep-key3) properties.
- type: list
- elements: int
- wep-key-type:
- description:
- - Controls the interpretation of WEP keys.
- - Allowed values are V(1), in which case the key is either a 10- or 26-character hexadecimal string, or a 5- or 13-character ASCII
- password; or V(2), in which case the passphrase is provided as a string and will be hashed using the de-facto MD5 method to derive the
- actual WEP key.
- type: int
- choices: [ 1, 2 ]
- wep-key0:
- description:
- - Index 0 WEP key. This is the WEP key used in most networks.
- - See the O(wifi_sec.wep-key-type) property for a description of how this key is interpreted.
- type: str
- wep-key1:
- description:
- - Index 1 WEP key. This WEP index is not used by most networks.
- - See the O(wifi_sec.wep-key-type) property for a description of how this key is interpreted.
- type: str
- wep-key2:
- description:
- - Index 2 WEP key. This WEP index is not used by most networks.
- - See the O(wifi_sec.wep-key-type) property for a description of how this key is interpreted.
- type: str
- wep-key3:
- description:
- - Index 3 WEP key. This WEP index is not used by most networks.
- - See the O(wifi_sec.wep-key-type) property for a description of how this key is interpreted.
- type: str
- wep-tx-keyidx:
- description:
- - When static WEP is used (that is, if O(wifi_sec.key-mgmt=none)) and a non-default WEP key index
- is used by the AP, put that WEP key index here.
- - Valid values are V(0) (default key) through V(3).
- - Note that some consumer access points (like the Linksys WRT54G) number the keys V(1) to V(4).
- type: int
- choices: [ 0, 1, 2, 3 ]
- default: 0
- wps-method:
- description:
- - Flags indicating which mode of WPS is to be used if any.
- - There is little point in changing the default setting as NetworkManager will automatically determine whether it is feasible to start WPS
- enrollment from the Access Point capabilities.
- - WPS can be disabled by setting this property to a value of V(1).
- type: int
- default: 0
- version_added: 3.0.0
- ssid:
- description:
- - Name of the Wireless router or the access point.
- type: str
- version_added: 3.0.0
- wifi:
- description:
- - The configuration of the WiFi connection.
- - Note the list of suboption attributes may vary depending on which version of NetworkManager/nmcli is installed on the host.
- - 'An up-to-date list of supported attributes can be found here:
- U(https://networkmanager.dev/docs/api/latest/settings-802-11-wireless.html).'
- - 'For instance to create a hidden AP mode WiFi connection:
- V({hidden: true, mode: ap}).'
- type: dict
- suboptions:
- ap-isolation:
- description:
- - Configures AP isolation, which prevents communication between wireless devices connected to this AP.
- - This property can be set to a value different from V(-1) only when the interface is configured in AP mode.
- - If set to V(1), devices are not able to communicate with each other. This increases security because it protects devices against attacks
- from other clients in the network. At the same time, it prevents devices to access resources on the same wireless networks as file
- shares, printers, etc.
- - If set to V(0), devices can talk to each other.
- - When set to V(-1), the global default is used; in case the global default is unspecified it is assumed to be V(0).
- type: int
- choices: [ -1, 0, 1 ]
- default: -1
- assigned-mac-address:
- description:
- - The new field for the cloned MAC address.
- - It can be either a hardware address in ASCII representation, or one of the special values V(preserve), V(permanent), V(random) or
- V(stable).
- - This field replaces the deprecated O(wifi.cloned-mac-address) on D-Bus, which can only contain explicit hardware addresses.
- - Note that this property only exists in D-Bus API. libnm and nmcli continue to call this property C(cloned-mac-address).
- type: str
- band:
- description:
- - 802.11 frequency band of the network.
- - One of V(a) for 5GHz 802.11a or V(bg) for 2.4GHz 802.11.
- - This will lock associations to the Wi-Fi network to the specific band, so for example, if V(a) is specified, the device will not
- associate with the same network in the 2.4GHz band even if the network's settings are compatible.
- - This setting depends on specific driver capability and may not work with all drivers.
- type: str
- choices: [ a, bg ]
- bssid:
- description:
- - If specified, directs the device to only associate with the given access point.
- - This capability is highly driver dependent and not supported by all devices.
- - Note this property does not control the BSSID used when creating an Ad-Hoc network and is unlikely to in the future.
- type: str
- channel:
- description:
- - Wireless channel to use for the Wi-Fi connection.
- - The device will only join (or create for Ad-Hoc networks) a Wi-Fi network on the specified channel.
- - Because channel numbers overlap between bands, this property also requires the O(wifi.band) property to be set.
- type: int
- default: 0
- cloned-mac-address:
- description:
- - This D-Bus field is deprecated in favor of O(wifi.assigned-mac-address) which is more flexible and allows specifying special variants like
- V(random).
- - For libnm and nmcli, this field is called C(cloned-mac-address).
- type: str
- generate-mac-address-mask:
- description:
- - With O(wifi.cloned-mac-address) setting V(random) or V(stable), by default all bits of the MAC address are scrambled and a
- locally-administered, unicast MAC address is created. This property allows to specify that certain bits are fixed.
- - Note that the least significant bit of the first MAC address will always be unset to create a unicast MAC address.
- - If the property is V(null), it is eligible to be overwritten by a default connection setting.
- - If the value is still V(null) or an empty string, the default is to create a locally-administered, unicast MAC address.
- - If the value contains one MAC address, this address is used as mask. The set bits of the mask are to be filled with the current MAC
- address of the device, while the unset bits are subject to randomization.
- - Setting V(FE:FF:FF:00:00:00) means to preserve the OUI of the current MAC address and only randomize the lower 3 bytes using the
- V(random) or V(stable) algorithm.
- - If the value contains one additional MAC address after the mask, this address is used instead of the current MAC address to fill the bits
- that shall not be randomized.
- - For example, a value of V(FE:FF:FF:00:00:00 68:F7:28:00:00:00) will set the OUI of the MAC address to 68:F7:28, while the lower bits are
- randomized.
- - A value of V(02:00:00:00:00:00 00:00:00:00:00:00) will create a fully scrambled globally-administered, burned-in MAC address.
- - If the value contains more than one additional MAC addresses, one of them is chosen randomly. For example,
- V(02:00:00:00:00:00 00:00:00:00:00:00 02:00:00:00:00:00) will create a fully scrambled MAC address, randomly locally or globally
- administered.
- type: str
- hidden:
- description:
- - If V(true), indicates that the network is a non-broadcasting network that hides its SSID. This works both in infrastructure and AP mode.
- - In infrastructure mode, various workarounds are used for a more reliable discovery of hidden networks, such as probe-scanning the SSID.
- However, these workarounds expose inherent insecurities with hidden SSID networks, and thus hidden SSID networks should be used with
- caution.
- - In AP mode, the created network does not broadcast its SSID.
- - Note that marking the network as hidden may be a privacy issue for you (in infrastructure mode) or client stations (in AP mode), as the
- explicit probe-scans are distinctly recognizable on the air.
- type: bool
- default: false
- mac-address-blacklist:
- description:
- - A list of permanent MAC addresses of Wi-Fi devices to which this connection should never apply.
- - Each MAC address should be given in the standard hex-digits-and-colons notation (for example, V(00:11:22:33:44:55)).
- type: list
- elements: str
- mac-address-randomization:
- description:
- - One of V(0) (never randomize unless the user has set a global default to randomize and the supplicant supports randomization), V(1)
- (never randomize the MAC address), or V(2) (always randomize the MAC address).
- - This property is deprecated for O(wifi.cloned-mac-address).
- type: int
- default: 0
- choices: [ 0, 1, 2 ]
- mac-address:
- description:
- - If specified, this connection will only apply to the Wi-Fi device whose permanent MAC address matches.
- - This property does not change the MAC address of the device (for example for MAC spoofing).
- type: str
- mode:
- description: Wi-Fi network mode. If blank, V(infrastructure) is assumed.
- type: str
- choices: [ infrastructure, mesh, adhoc, ap ]
- default: infrastructure
- mtu:
- description: If non-zero, only transmit packets of the specified size or smaller, breaking larger packets up into multiple Ethernet frames.
- type: int
- default: 0
- powersave:
- description:
- - One of V(2) (disable Wi-Fi power saving), V(3) (enable Wi-Fi power saving), V(1) (don't touch currently configure setting) or V(0) (use
- the globally configured value).
- - All other values are reserved.
- type: int
- default: 0
- choices: [ 0, 1, 2, 3 ]
- rate:
- description:
- - If non-zero, directs the device to only use the specified bitrate for communication with the access point.
- - Units are in Kb/s, so for example V(5500) = 5.5 Mbit/s.
- - This property is highly driver dependent and not all devices support setting a static bitrate.
- type: int
- default: 0
- tx-power:
- description:
- - If non-zero, directs the device to use the specified transmit power.
- - Units are dBm.
- - This property is highly driver dependent and not all devices support setting a static transmit power.
- type: int
- default: 0
- wake-on-wlan:
- description:
- - The NMSettingWirelessWakeOnWLan options to enable. Not all devices support all options.
- - May be any combination of C(NM_SETTING_WIRELESS_WAKE_ON_WLAN_ANY) (V(0x2)), C(NM_SETTING_WIRELESS_WAKE_ON_WLAN_DISCONNECT) (V(0x4)),
- C(NM_SETTING_WIRELESS_WAKE_ON_WLAN_MAGIC) (V(0x8)), C(NM_SETTING_WIRELESS_WAKE_ON_WLAN_GTK_REKEY_FAILURE) (V(0x10)),
- C(NM_SETTING_WIRELESS_WAKE_ON_WLAN_EAP_IDENTITY_REQUEST) (V(0x20)), C(NM_SETTING_WIRELESS_WAKE_ON_WLAN_4WAY_HANDSHAKE) (V(0x40)),
- C(NM_SETTING_WIRELESS_WAKE_ON_WLAN_RFKILL_RELEASE) (V(0x80)), C(NM_SETTING_WIRELESS_WAKE_ON_WLAN_TCP) (V(0x100)) or the special values
- V(0x1) (to use global settings) and V(0x8000) (to disable management of Wake-on-LAN in NetworkManager).
- - Note the option values' sum must be specified in order to combine multiple options.
- type: int
- default: 1
- version_added: 3.5.0
- ignore_unsupported_suboptions:
- description:
- - Ignore suboptions which are invalid or unsupported by the version of NetworkManager/nmcli installed on the host.
- - Only O(wifi) and O(wifi_sec) options are currently affected.
- type: bool
- default: false
- version_added: 3.6.0
- gsm:
- description:
- - The configuration of the GSM connection.
- - Note the list of suboption attributes may vary depending on which version of NetworkManager/nmcli is installed on the host.
- - 'An up-to-date list of supported attributes can be found here:
- U(https://networkmanager.dev/docs/api/latest/settings-gsm.html).'
- - 'For instance to use apn, pin, username and password:
- V({apn: provider.apn, pin: 1234, username: apn.username, password: apn.password}).'
- type: dict
- version_added: 3.7.0
- suboptions:
- apn:
- description:
- - The GPRS Access Point Name specifying the APN used when establishing a data session with the GSM-based network.
- - The APN often determines how the user will be billed for their network usage and whether the user has access to the Internet or
- just a provider-specific walled-garden, so it is important to use the correct APN for the user's mobile broadband plan.
- - The APN may only be composed of the characters a-z, 0-9, ., and - per GSM 03.60 Section 14.9.
- type: str
- auto-config:
- description: When V(true), the settings such as O(gsm.apn), O(gsm.username), or O(gsm.password) will default to values that match the network
- the modem will register to in the Mobile Broadband Provider database.
- type: bool
- default: false
- device-id:
- description:
- - The device unique identifier (as given by the V(WWAN) management service) which this connection applies to.
- - If given, the connection will only apply to the specified device.
- type: str
- home-only:
- description:
- - When V(true), only connections to the home network will be allowed.
- - Connections to roaming networks will not be made.
- type: bool
- default: false
- mtu:
- description: If non-zero, only transmit packets of the specified size or smaller, breaking larger packets up into multiple Ethernet frames.
- type: int
- default: 0
- network-id:
- description:
- - The Network ID (GSM LAI format, ie MCC-MNC) to force specific network registration.
- - If the Network ID is specified, NetworkManager will attempt to force the device to register only on the specified network.
- - This can be used to ensure that the device does not roam when direct roaming control of the device is not otherwise possible.
- type: str
- number:
- description: Legacy setting that used to help establishing PPP data sessions for GSM-based modems.
- type: str
- password:
- description:
- - The password used to authenticate with the network, if required.
- - Many providers do not require a password, or accept any password.
- - But if a password is required, it is specified here.
- type: str
- password-flags:
- description:
- - NMSettingSecretFlags indicating how to handle the O(gsm.password) property.
- - 'Following choices are allowed:
- V(0) B(NONE): The system is responsible for providing and storing this secret (default),
- V(1) B(AGENT_OWNED): A user secret agent is responsible for providing and storing this secret; when it is required agents will be
- asked to retrieve it
- V(2) B(NOT_SAVED): This secret should not be saved, but should be requested from the user each time it is needed
- V(4) B(NOT_REQUIRED): In situations where it cannot be automatically determined that the secret is required
- (some VPNs and PPP providers do not require all secrets) this flag indicates that the specific secret is not required.'
- type: int
- choices: [ 0, 1, 2 , 4 ]
- default: 0
- pin:
- description:
- - If the SIM is locked with a PIN it must be unlocked before any other operations are requested.
- - Specify the PIN here to allow operation of the device.
- type: str
- pin-flags:
- description:
- - NMSettingSecretFlags indicating how to handle the O(gsm.pin) property.
- - See O(gsm.password-flags) for NMSettingSecretFlags choices.
- type: int
- choices: [ 0, 1, 2 , 4 ]
- default: 0
- sim-id:
- description:
- - The SIM card unique identifier (as given by the C(WWAN) management service) which this connection applies to.
- - 'If given, the connection will apply to any device also allowed by O(gsm.device-id) which contains a SIM card matching
- the given identifier.'
- type: str
- sim-operator-id:
- description:
- - A MCC/MNC string like V(310260) or V(21601I) identifying the specific mobile network operator which this connection applies to.
- - 'If given, the connection will apply to any device also allowed by O(gsm.device-id) and O(gsm.sim-id) which contains a SIM card
- provisioned by the given operator.'
- type: str
- username:
- description:
- - The username used to authenticate with the network, if required.
- - Many providers do not require a username, or accept any username.
- - But if a username is required, it is specified here.
- macvlan:
- description:
- - The configuration of the MAC VLAN connection.
- - Note the list of suboption attributes may vary depending on which version of NetworkManager/nmcli is installed on the host.
- - 'An up-to-date list of supported attributes can be found here:
- U(https://networkmanager.dev/docs/api/latest/settings-macvlan.html).'
- type: dict
- version_added: 6.6.0
- suboptions:
- mode:
- description:
- - The macvlan mode, which specifies the communication mechanism between multiple macvlans on the same lower device.
- - 'Following choices are allowed: V(1) B(vepa), V(2) B(bridge), V(3) B(private), V(4) B(passthru)
- and V(5) B(source)'
- type: int
- choices: [ 1, 2, 3, 4, 5 ]
- required: true
- parent:
- description:
- - If given, specifies the parent interface name or parent connection UUID from which this MAC-VLAN interface should
- be created. If this property is not specified, the connection must contain an "802-3-ethernet" setting with a
- "mac-address" property.
- type: str
- required: true
- promiscuous:
- description:
- - Whether the interface should be put in promiscuous mode.
- type: bool
- tap:
- description:
- - Whether the interface should be a MACVTAP.
- type: bool
- wireguard:
- description:
- - The configuration of the Wireguard connection.
- - Note the list of suboption attributes may vary depending on which version of NetworkManager/nmcli is installed on the host.
- - 'An up-to-date list of supported attributes can be found here:
- U(https://networkmanager.dev/docs/api/latest/settings-wireguard.html).'
- - 'For instance to configure a listen port:
- V({listen-port: 12345}).'
- type: dict
- version_added: 4.3.0
- suboptions:
- fwmark:
- description:
- - The 32-bit fwmark for outgoing packets.
- - The use of fwmark is optional and is by default off. Setting it to 0 disables it.
- - Note that O(wireguard.ip4-auto-default-route) or O(wireguard.ip6-auto-default-route) enabled, implies to automatically choose a fwmark.
- type: int
- ip4-auto-default-route:
- description:
- - Whether to enable special handling of the IPv4 default route.
- - If enabled, the IPv4 default route from O(wireguard.peer-routes) will be placed to a dedicated routing-table and two policy
- routing rules will be added.
- - The fwmark number is also used as routing-table for the default-route, and if fwmark is zero, an unused fwmark/table is chosen
- automatically. This corresponds to what wg-quick does with Table=auto and what WireGuard calls "Improved Rule-based Routing"
- type: bool
- ip6-auto-default-route:
- description:
- - Like O(wireguard.ip4-auto-default-route), but for the IPv6 default route.
- type: bool
- listen-port:
- description: The WireGuard connection listen-port. If not specified, the port will be chosen randomly when the
- interface comes up.
- type: int
- mtu:
- description:
- - If non-zero, only transmit packets of the specified size or smaller, breaking larger packets up into multiple fragments.
- - If zero a default MTU is used. Note that contrary to wg-quick's MTU setting, this does not take into account the current routes
- at the time of activation.
- type: int
- peer-routes:
- description:
- - Whether to automatically add routes for the AllowedIPs ranges of the peers.
- - If V(true) (the default), NetworkManager will automatically add routes in the routing tables according to C(ipv4.route-table) and
- C(ipv6.route-table). Usually you want this automatism enabled.
- - If V(false), no such routes are added automatically. In this case, the user may want to configure static routes in C(ipv4.routes)
- and C(ipv6.routes), respectively.
- - Note that if the peer's AllowedIPs is V(0.0.0.0/0) or V(::/0) and the profile's C(ipv4.never-default) or C(ipv6.never-default)
- setting is enabled, the peer route for this peer won't be added automatically.
- type: bool
- private-key:
- description: The 256 bit private-key in base64 encoding.
- type: str
- private-key-flags:
- description: C(NMSettingSecretFlags) indicating how to handle the O(wireguard.private-key) property.
- type: int
- choices: [ 0, 1, 2 ]
- vpn:
- description:
- - Configuration of a VPN connection (PPTP and L2TP).
- - In order to use L2TP you need to be sure that C(network-manager-l2tp) - and C(network-manager-l2tp-gnome)
- if host has UI - are installed on the host.
- type: dict
- version_added: 5.1.0
- suboptions:
- permissions:
- description: User that will have permission to use the connection.
- type: str
- required: true
- service-type:
- description: This defines the service type of connection.
- type: str
- required: true
- gateway:
- description: The gateway to connection. It can be an IP address (for example V(192.0.2.1))
- or a FQDN address (for example V(vpn.example.com)).
- type: str
- required: true
- password-flags:
- description:
- - NMSettingSecretFlags indicating how to handle the C(vpn.password) property.
- - 'Following choices are allowed:
- V(0) B(NONE): The system is responsible for providing and storing this secret (default);
- V(1) B(AGENT_OWNED): A user secret agent is responsible for providing and storing this secret; when it is required agents will be
- asked to retrieve it;
- V(2) B(NOT_SAVED): This secret should not be saved, but should be requested from the user each time it is needed;
- V(4) B(NOT_REQUIRED): In situations where it cannot be automatically determined that the secret is required
- (some VPNs and PPP providers do not require all secrets) this flag indicates that the specific secret is not required.'
- type: int
- choices: [ 0, 1, 2 , 4 ]
- default: 0
- user:
- description: Username provided by VPN administrator.
- type: str
- required: true
- ipsec-enabled:
- description:
- - Enable or disable IPSec tunnel to L2TP host.
- - This option is need when O(vpn.service-type) is V(org.freedesktop.NetworkManager.l2tp).
- type: bool
- ipsec-psk:
- description:
- - The pre-shared key in base64 encoding.
- - >
- You can encode using this Ansible jinja2 expression: V("0s{{ '[YOUR PRE-SHARED KEY]' | ansible.builtin.b64encode }}").
- - This is only used when O(vpn.ipsec-enabled=true).
- type: str
-'''
+"""
-EXAMPLES = r'''
+EXAMPLES = r"""
# These examples are using the following inventory:
#
# ## Directory layout:
@@ -1168,233 +1262,231 @@ EXAMPLES = r'''
# ```
-
## playbook-add.yml example
----
- hosts: openstack-stage
remote_user: root
tasks:
- - name: Install needed network manager libs
- ansible.builtin.package:
- name:
- - NetworkManager-libnm
- - nm-connection-editor
- - libsemanage-python
- - policycoreutils-python
- state: present
+ - name: Install needed network manager libs
+ ansible.builtin.package:
+ name:
+ - NetworkManager-libnm
+ - nm-connection-editor
+ - libsemanage-python
+ - policycoreutils-python
+ state: present
##### Working with all cloud nodes - Teaming
- - name: Try nmcli add team - conn_name only & ip4 gw4
- community.general.nmcli:
- type: team
- conn_name: '{{ item.conn_name }}'
- ip4: '{{ item.ip4 }}'
- gw4: '{{ item.gw4 }}'
- state: present
- with_items:
- - '{{ nmcli_team }}'
+ - name: Try nmcli add team - conn_name only & ip4 gw4
+ community.general.nmcli:
+ type: team
+ conn_name: '{{ item.conn_name }}'
+ ip4: '{{ item.ip4 }}'
+ gw4: '{{ item.gw4 }}'
+ state: present
+ with_items:
+ - '{{ nmcli_team }}'
- - name: Try nmcli add teams-slave
- community.general.nmcli:
- type: team-slave
- conn_name: '{{ item.conn_name }}'
- ifname: '{{ item.ifname }}'
- master: '{{ item.master }}'
- state: present
- with_items:
- - '{{ nmcli_team_slave }}'
+ - name: Try nmcli add teams-slave
+ community.general.nmcli:
+ type: team-slave
+ conn_name: '{{ item.conn_name }}'
+ ifname: '{{ item.ifname }}'
+ master: '{{ item.master }}'
+ state: present
+ with_items:
+ - '{{ nmcli_team_slave }}'
-###### Working with all cloud nodes - Bonding
- - name: Try nmcli add bond - conn_name only & ip4 gw4 mode
- community.general.nmcli:
- type: bond
- conn_name: '{{ item.conn_name }}'
- ip4: '{{ item.ip4 }}'
- gw4: '{{ item.gw4 }}'
- mode: '{{ item.mode }}'
- state: present
- with_items:
- - '{{ nmcli_bond }}'
+##### Working with all cloud nodes - Bonding
+ - name: Try nmcli add bond - conn_name only & ip4 gw4 mode
+ community.general.nmcli:
+ type: bond
+ conn_name: '{{ item.conn_name }}'
+ ip4: '{{ item.ip4 }}'
+ gw4: '{{ item.gw4 }}'
+ mode: '{{ item.mode }}'
+ state: present
+ with_items:
+ - '{{ nmcli_bond }}'
- - name: Try nmcli add bond-slave
- community.general.nmcli:
- type: bond-slave
- conn_name: '{{ item.conn_name }}'
- ifname: '{{ item.ifname }}'
- master: '{{ item.master }}'
- state: present
- with_items:
- - '{{ nmcli_bond_slave }}'
+ - name: Try nmcli add bond-slave
+ community.general.nmcli:
+ type: bond-slave
+ conn_name: '{{ item.conn_name }}'
+ ifname: '{{ item.ifname }}'
+ master: '{{ item.master }}'
+ state: present
+ with_items:
+ - '{{ nmcli_bond_slave }}'
##### Working with all cloud nodes - Ethernet
- - name: Try nmcli add Ethernet - conn_name only & ip4 gw4
- community.general.nmcli:
- type: ethernet
- conn_name: '{{ item.conn_name }}'
- ip4: '{{ item.ip4 }}'
- gw4: '{{ item.gw4 }}'
- state: present
- with_items:
- - '{{ nmcli_ethernet }}'
+ - name: Try nmcli add Ethernet - conn_name only & ip4 gw4
+ community.general.nmcli:
+ type: ethernet
+ conn_name: '{{ item.conn_name }}'
+ ip4: '{{ item.ip4 }}'
+ gw4: '{{ item.gw4 }}'
+ state: present
+ with_items:
+ - '{{ nmcli_ethernet }}'
## playbook-del.yml example
- hosts: openstack-stage
remote_user: root
tasks:
- - name: Try nmcli del team - multiple
- community.general.nmcli:
- conn_name: '{{ item.conn_name }}'
- state: absent
- with_items:
- - conn_name: em1
- - conn_name: em2
- - conn_name: p1p1
- - conn_name: p1p2
- - conn_name: p2p1
- - conn_name: p2p2
- - conn_name: tenant
- - conn_name: storage
- - conn_name: external
- - conn_name: team-em1
- - conn_name: team-em2
- - conn_name: team-p1p1
- - conn_name: team-p1p2
- - conn_name: team-p2p1
- - conn_name: team-p2p2
+ - name: Try nmcli del team - multiple
+ community.general.nmcli:
+ conn_name: '{{ item.conn_name }}'
+ state: absent
+ with_items:
+ - conn_name: em1
+ - conn_name: em2
+ - conn_name: p1p1
+ - conn_name: p1p2
+ - conn_name: p2p1
+ - conn_name: p2p2
+ - conn_name: tenant
+ - conn_name: storage
+ - conn_name: external
+ - conn_name: team-em1
+ - conn_name: team-em2
+ - conn_name: team-p1p1
+ - conn_name: team-p1p2
+ - conn_name: team-p2p1
+ - conn_name: team-p2p2
- - name: Add an Ethernet connection with static IP configuration
- community.general.nmcli:
- conn_name: my-eth1
- ifname: eth1
- type: ethernet
- ip4: 192.0.2.100/24
- gw4: 192.0.2.1
- state: present
+ - name: Add an Ethernet connection with static IP configuration
+ community.general.nmcli:
+ conn_name: my-eth1
+ ifname: eth1
+ type: ethernet
+ ip4: 192.0.2.100/24
+ gw4: 192.0.2.1
+ state: present
- - name: Add an Team connection with static IP configuration
- community.general.nmcli:
- conn_name: my-team1
- ifname: my-team1
- type: team
- ip4: 192.0.2.100/24
- gw4: 192.0.2.1
- state: present
- autoconnect: true
+ - name: Add an Team connection with static IP configuration
+ community.general.nmcli:
+ conn_name: my-team1
+ ifname: my-team1
+ type: team
+ ip4: 192.0.2.100/24
+ gw4: 192.0.2.1
+ state: present
+ autoconnect: true
- - name: Optionally, at the same time specify IPv6 addresses for the device
- community.general.nmcli:
- conn_name: my-eth1
- ifname: eth1
- type: ethernet
- ip4: 192.0.2.100/24
- gw4: 192.0.2.1
- ip6: 2001:db8::cafe
- gw6: 2001:db8::1
- state: present
+ - name: Optionally, at the same time specify IPv6 addresses for the device
+ community.general.nmcli:
+ conn_name: my-eth1
+ ifname: eth1
+ type: ethernet
+ ip4: 192.0.2.100/24
+ gw4: 192.0.2.1
+ ip6: 2001:db8::cafe
+ gw6: 2001:db8::1
+ state: present
- - name: Add two IPv4 DNS server addresses
- community.general.nmcli:
- conn_name: my-eth1
- type: ethernet
- dns4:
- - 192.0.2.53
- - 198.51.100.53
- state: present
+ - name: Add two IPv4 DNS server addresses
+ community.general.nmcli:
+ conn_name: my-eth1
+ type: ethernet
+ dns4:
+ - 192.0.2.53
+ - 198.51.100.53
+ state: present
- - name: Make a profile usable for all compatible Ethernet interfaces
- community.general.nmcli:
- ctype: ethernet
- name: my-eth1
- ifname: '*'
- state: present
+ - name: Make a profile usable for all compatible Ethernet interfaces
+ community.general.nmcli:
+ ctype: ethernet
+ name: my-eth1
+ ifname: '*'
+ state: present
- - name: Change the property of a setting e.g. MTU
- community.general.nmcli:
- conn_name: my-eth1
- mtu: 9000
- type: ethernet
- state: present
+ - name: Change the property of a setting e.g. MTU
+ community.general.nmcli:
+ conn_name: my-eth1
+ mtu: 9000
+ type: ethernet
+ state: present
- - name: Change the property of a setting e.g. MTU and reload connection
- community.general.nmcli:
- conn_name: my-eth1
- mtu: 1500
- type: ethernet
- state: present
- conn_reload: true
+ - name: Change the property of a setting e.g. MTU and reload connection
+ community.general.nmcli:
+ conn_name: my-eth1
+ mtu: 1500
+ type: ethernet
+ state: present
+ conn_reload: true
- - name: Disable connection
- community.general.nmcli:
- conn_name: my-eth1
- state: down
+ - name: Disable connection
+ community.general.nmcli:
+ conn_name: my-eth1
+ state: down
- - name: Reload and enable connection
- community.general.nmcli:
- conn_name: my-eth1
- state: up
- conn_reload: true
+ - name: Reload and enable connection
+ community.general.nmcli:
+ conn_name: my-eth1
+ state: up
+ conn_reload: true
- - name: Add second ip4 address
- community.general.nmcli:
- conn_name: my-eth1
- ifname: eth1
- type: ethernet
- ip4:
- - 192.0.2.100/24
- - 192.0.3.100/24
- state: present
+ - name: Add second ip4 address
+ community.general.nmcli:
+ conn_name: my-eth1
+ ifname: eth1
+ type: ethernet
+ ip4:
+ - 192.0.2.100/24
+ - 192.0.3.100/24
+ state: present
- - name: Add second ip6 address
- community.general.nmcli:
- conn_name: my-eth1
- ifname: eth1
- type: ethernet
- ip6:
- - 2001:db8::cafe
- - 2002:db8::cafe
- state: present
+ - name: Add second ip6 address
+ community.general.nmcli:
+ conn_name: my-eth1
+ ifname: eth1
+ type: ethernet
+ ip6:
+ - 2001:db8::cafe
+ - 2002:db8::cafe
+ state: present
- - name: Add VxLan
- community.general.nmcli:
- type: vxlan
- conn_name: vxlan_test1
- vxlan_id: 16
- vxlan_local: 192.168.1.2
- vxlan_remote: 192.168.1.5
+ - name: Add VxLan
+ community.general.nmcli:
+ type: vxlan
+ conn_name: vxlan_test1
+ vxlan_id: 16
+ vxlan_local: 192.168.1.2
+ vxlan_remote: 192.168.1.5
- - name: Add gre
- community.general.nmcli:
- type: gre
- conn_name: gre_test1
- ip_tunnel_dev: eth0
- ip_tunnel_local: 192.168.1.2
- ip_tunnel_remote: 192.168.1.5
+ - name: Add gre
+ community.general.nmcli:
+ type: gre
+ conn_name: gre_test1
+ ip_tunnel_dev: eth0
+ ip_tunnel_local: 192.168.1.2
+ ip_tunnel_remote: 192.168.1.5
- - name: Add ipip
- community.general.nmcli:
- type: ipip
- conn_name: ipip_test1
- ip_tunnel_dev: eth0
- ip_tunnel_local: 192.168.1.2
- ip_tunnel_remote: 192.168.1.5
+ - name: Add ipip
+ community.general.nmcli:
+ type: ipip
+ conn_name: ipip_test1
+ ip_tunnel_dev: eth0
+ ip_tunnel_local: 192.168.1.2
+ ip_tunnel_remote: 192.168.1.5
- - name: Add sit
- community.general.nmcli:
- type: sit
- conn_name: sit_test1
- ip_tunnel_dev: eth0
- ip_tunnel_local: 192.168.1.2
- ip_tunnel_remote: 192.168.1.5
+ - name: Add sit
+ community.general.nmcli:
+ type: sit
+ conn_name: sit_test1
+ ip_tunnel_dev: eth0
+ ip_tunnel_local: 192.168.1.2
+ ip_tunnel_remote: 192.168.1.5
- - name: Add zone
- community.general.nmcli:
- type: ethernet
- conn_name: my-eth1
- zone: external
- state: present
+ - name: Add zone
+ community.general.nmcli:
+ type: ethernet
+ conn_name: my-eth1
+ zone: external
+ state: present
# nmcli exits with status 0 if it succeeds and exits with a status greater
# than zero when there is a failure. The following list of status codes may be
@@ -1442,10 +1534,10 @@ EXAMPLES = r'''
conn_name: my-gsm-provider
ifname: cdc-wdm0
gsm:
- apn: my.provider.apn
- username: my-provider-username
- password: my-provider-password
- pin: my-sim-pin
+ apn: my.provider.apn
+ username: my-provider-username
+ password: my-provider-password
+ pin: my-sim-pin
autoconnect: true
state: present
@@ -1455,8 +1547,8 @@ EXAMPLES = r'''
conn_name: my-macvlan-connection
ifname: mymacvlan0
macvlan:
- mode: 2
- parent: eth1
+ mode: 2
+ parent: eth1
autoconnect: true
state: present
@@ -1466,8 +1558,8 @@ EXAMPLES = r'''
conn_name: my-wg-provider
ifname: mywg0
wireguard:
- listen-port: 51820
- private-key: my-private-key
+ listen-port: 51820
+ private-key: my-private-key
autoconnect: true
state: present
@@ -1478,13 +1570,13 @@ EXAMPLES = r'''
type: vpn
conn_name: my-vpn-connection
vpn:
- permissions: "{{ ansible_user }}"
- service-type: org.freedesktop.NetworkManager.l2tp
- gateway: vpn.example.com
- password-flags: 2
- user: brittany
- ipsec-enabled: true
- ipsec-psk: "0s{{ 'Brittany123' | ansible.builtin.b64encode }}"
+ permissions: "{{ ansible_user }}"
+ service-type: org.freedesktop.NetworkManager.l2tp
+ gateway: vpn.example.com
+ password-flags: 2
+ user: brittany
+ ipsec-enabled: true
+ ipsec-psk: "0s{{ 'Brittany123' | ansible.builtin.b64encode }}"
autoconnect: false
state: present
@@ -1522,6 +1614,29 @@ EXAMPLES = r'''
vlanid: 5
state: present
+## Creating VRF and adding VLAN interface to it
+- name: Create VRF
+ community.general.nmcli:
+ type: vrf
+ ifname: vrf10
+ table: 10
+ state: present
+ conn_name: vrf10
+ method4: disabled
+ method6: disabled
+
+- name: Create VLAN interface inside VRF
+ community.general.nmcli:
+ conn_name: "eth0.124"
+ type: vlan
+ vlanid: "124"
+ vlandev: "eth0"
+ master: "vrf10"
+ slave_type: vrf
+ state: "present"
+ ip4: '192.168.124.50'
+ gw4: '192.168.124.1'
+
## Defining ip rules while setting a static IP
## table 'production' is set with id 200 in this example.
- name: Set Static ips for interface with ip rules and routes
@@ -1563,7 +1678,7 @@ EXAMPLES = r'''
slave_type: ovs-port
type: ethernet
state: present
-'''
+"""
RETURN = r"""#
"""
@@ -1608,6 +1723,8 @@ class Nmcli(object):
self.state = module.params['state']
self.ignore_unsupported_suboptions = module.params['ignore_unsupported_suboptions']
self.autoconnect = module.params['autoconnect']
+ self.autoconnect_priority = module.params['autoconnect_priority']
+ self.autoconnect_retries = module.params['autoconnect_retries']
self.conn_name = module.params['conn_name']
self.conn_reload = module.params['conn_reload']
self.slave_type = module.params['slave_type']
@@ -1650,6 +1767,7 @@ class Nmcli(object):
self.downdelay = module.params['downdelay']
self.updelay = module.params['updelay']
self.xmit_hash_policy = module.params['xmit_hash_policy']
+ self.fail_over_mac = module.params['fail_over_mac']
self.arp_interval = module.params['arp_interval']
self.arp_ip_target = module.params['arp_ip_target']
self.slavepriority = module.params['slavepriority']
@@ -1687,6 +1805,8 @@ class Nmcli(object):
self.wireguard = module.params['wireguard']
self.vpn = module.params['vpn']
self.transport_mode = module.params['transport_mode']
+ self.infiniband_mac = module.params['infiniband_mac']
+ self.sriov = module.params['sriov']
if self.method4:
self.ipv4_method = self.method4
@@ -1706,6 +1826,9 @@ class Nmcli(object):
else:
self.ipv6_method = None
+ if self.type == "vrf":
+ self.table = module.params['table']
+
self.edit_commands = []
self.extra_options_validation()
@@ -1717,10 +1840,7 @@ class Nmcli(object):
self.module.fail_json(msg="'master' option is required when 'slave_type' is specified.")
def execute_command(self, cmd, use_unsafe_shell=False, data=None):
- if isinstance(cmd, list):
- cmd = [to_text(item) for item in cmd]
- else:
- cmd = to_text(cmd)
+ cmd = [to_text(item) for item in cmd]
return self.module.run_command(cmd, use_unsafe_shell=use_unsafe_shell, data=data)
def execute_edit_commands(self, commands, arguments):
@@ -1733,12 +1853,15 @@ class Nmcli(object):
# Options common to multiple connection types.
options = {
'connection.autoconnect': self.autoconnect,
+ 'connection.autoconnect-priority': self.autoconnect_priority,
+ 'connection.autoconnect-retries': self.autoconnect_retries,
'connection.zone': self.zone,
}
# IP address options.
# The ovs-interface type can be both ip_conn_type and have a master
- if (self.ip_conn_type and not self.master) or self.type == "ovs-interface":
+ # An interface that has a master but is of slave type vrf can have an IP address
+ if (self.ip_conn_type and (not self.master or self.slave_type == "vrf")) or self.type == "ovs-interface":
options.update({
'ipv4.addresses': self.enforce_ipv4_cidr_notation(self.ip4),
'ipv4.dhcp-client-id': self.dhcp_client_id,
@@ -1797,6 +1920,7 @@ class Nmcli(object):
'primary': self.primary,
'updelay': self.updelay,
'xmit_hash_policy': self.xmit_hash_policy,
+ 'fail_over_mac': self.fail_over_mac,
})
elif self.type == 'bond-slave':
if self.slave_type and self.slave_type != 'bond':
@@ -1951,6 +2075,19 @@ class Nmcli(object):
options.update({
'infiniband.transport-mode': self.transport_mode,
})
+ if self.infiniband_mac:
+ options['infiniband.mac-address'] = self.infiniband_mac
+ elif self.type == 'vrf':
+ options.update({
+ 'table': self.table,
+ })
+
+ if self.type == 'ethernet':
+ if self.sriov:
+ for name, value in self.sriov.items():
+ options.update({
+ 'sriov.%s' % name: value,
+ })
# Convert settings values based on the situation.
for setting, value in options.items():
@@ -2000,6 +2137,7 @@ class Nmcli(object):
'vpn',
'loopback',
'ovs-interface',
+ 'vrf'
)
@property
@@ -2129,7 +2267,7 @@ class Nmcli(object):
@staticmethod
def settings_type(setting):
- if setting in ('bridge.stp',
+ if setting in {'bridge.stp',
'bridge-port.hairpin-mode',
'connection.autoconnect',
'ipv4.never-default',
@@ -2139,9 +2277,9 @@ class Nmcli(object):
'ipv6.ignore-auto-dns',
'ipv6.ignore-auto-routes',
'802-11-wireless.hidden',
- 'team.runner-fast-rate'):
+ 'team.runner-fast-rate'}:
return bool
- elif setting in ('ipv4.addresses',
+ elif setting in {'ipv4.addresses',
'ipv6.addresses',
'ipv4.dns',
'ipv4.dns-search',
@@ -2158,8 +2296,11 @@ class Nmcli(object):
'802-11-wireless-security.proto',
'802-11-wireless-security.psk-flags',
'802-11-wireless-security.wep-key-flags',
- '802-11-wireless.mac-address-blacklist'):
+ '802-11-wireless.mac-address-blacklist'}:
return list
+ elif setting in {'connection.autoconnect-priority',
+ 'connection.autoconnect-retries'}:
+ return int
return str
def get_route_params(self, raw_values):
@@ -2237,6 +2378,9 @@ class Nmcli(object):
if key == 'xmit_hash_policy':
cmd.extend(['+bond.options', 'xmit_hash_policy=%s' % value])
continue
+ if key == 'fail_over_mac':
+ cmd.extend(['+bond.options', 'fail_over_mac=%s' % value])
+ continue
cmd.extend([key, value])
return self.execute_command(cmd)
@@ -2338,7 +2482,7 @@ class Nmcli(object):
for line in out.splitlines():
prefix = '%s.' % setting
- if (line.startswith(prefix)):
+ if line.startswith(prefix):
pair = line.split(':', 1)
property = pair[0].strip().replace(prefix, '')
properties.append(property)
@@ -2466,11 +2610,13 @@ def main():
argument_spec=dict(
ignore_unsupported_suboptions=dict(type='bool', default=False),
autoconnect=dict(type='bool', default=True),
+ autoconnect_priority=dict(type='int'),
+ autoconnect_retries=dict(type='int'),
state=dict(type='str', required=True, choices=['absent', 'present', 'up', 'down']),
conn_name=dict(type='str', required=True),
conn_reload=dict(type='bool', default=False),
master=dict(type='str'),
- slave_type=dict(type='str', choices=['bond', 'bridge', 'team', 'ovs-port']),
+ slave_type=dict(type='str', choices=['bond', 'bridge', 'team', 'ovs-port', 'vrf']),
ifname=dict(type='str'),
type=dict(type='str',
choices=[
@@ -2498,6 +2644,7 @@ def main():
'ovs-interface',
'ovs-bridge',
'ovs-port',
+ 'vrf',
]),
ip4=dict(type='list', elements='str'),
gw4=dict(type='str'),
@@ -2555,6 +2702,7 @@ def main():
downdelay=dict(type='int'),
updelay=dict(type='int'),
xmit_hash_policy=dict(type='str'),
+ fail_over_mac=dict(type='str', choices=['none', 'active', 'follow']),
arp_interval=dict(type='int'),
arp_ip_target=dict(type='str'),
primary=dict(type='str'),
@@ -2608,12 +2756,21 @@ def main():
tap=dict(type='bool'))),
wireguard=dict(type='dict'),
vpn=dict(type='dict'),
+ sriov=dict(type='dict'),
+ table=dict(type='int'),
+ # infiniband specific vars
transport_mode=dict(type='str', choices=['datagram', 'connected']),
+ infiniband_mac=dict(type='str'),
+
),
mutually_exclusive=[['never_default4', 'gw4'],
['routes4_extended', 'routes4'],
['routes6_extended', 'routes6']],
- required_if=[("type", "wifi", [("ssid")])],
+ required_if=[
+ ("type", "wifi", ["ssid"]),
+ ("type", "team-slave", ["master", "ifname"]),
+ ("slave_type", "team", ["master", "ifname"]),
+ ],
supports_check_mode=True,
)
module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C')
@@ -2623,21 +2780,12 @@ def main():
(rc, out, err) = (None, '', '')
result = {'conn_name': nmcli.conn_name, 'state': nmcli.state}
- # check for issues
- if nmcli.conn_name is None:
- nmcli.module.fail_json(msg="Please specify a name for the connection")
# team checks
if nmcli.type == "team":
if nmcli.runner_hwaddr_policy and not nmcli.runner == "activebackup":
nmcli.module.fail_json(msg="Runner-hwaddr-policy is only allowed for runner activebackup")
if nmcli.runner_fast_rate is not None and nmcli.runner != "lacp":
nmcli.module.fail_json(msg="runner-fast-rate is only allowed for runner lacp")
- # team-slave checks
- if nmcli.type == 'team-slave' or nmcli.slave_type == 'team':
- if nmcli.master is None:
- nmcli.module.fail_json(msg="Please specify a name for the master when type is %s" % nmcli.type)
- if nmcli.ifname is None:
- nmcli.module.fail_json(msg="Please specify an interface name for the connection when type is %s" % nmcli.type)
if nmcli.type == 'wifi':
unsupported_properties = {}
if nmcli.wifi:
@@ -2660,7 +2808,7 @@ def main():
(rc, out, err) = nmcli.down_connection()
(rc, out, err) = nmcli.remove_connection()
if rc != 0:
- module.fail_json(name=('No Connection named %s exists' % nmcli.conn_name), msg=err, rc=rc)
+ module.fail_json(name=('Error removing connection named %s' % nmcli.conn_name), msg=err, rc=rc)
elif nmcli.state == 'present':
if nmcli.connection_exists():
@@ -2697,7 +2845,7 @@ def main():
(rc, out, err) = nmcli.reload_connection()
(rc, out, err) = nmcli.up_connection()
if rc != 0:
- module.fail_json(name=('No Connection named %s exists' % nmcli.conn_name), msg=err, rc=rc)
+ module.fail_json(name=('Error bringing up connection named %s' % nmcli.conn_name), msg=err, rc=rc)
elif nmcli.state == 'down':
if nmcli.connection_exists():
@@ -2707,7 +2855,7 @@ def main():
(rc, out, err) = nmcli.reload_connection()
(rc, out, err) = nmcli.down_connection()
if rc != 0:
- module.fail_json(name=('No Connection named %s exists' % nmcli.conn_name), msg=err, rc=rc)
+ module.fail_json(name=('Error bringing down connection named %s' % nmcli.conn_name), msg=err, rc=rc)
except NmcliModuleError as e:
module.fail_json(name=nmcli.conn_name, msg=str(e))
diff --git a/plugins/modules/nomad_job.py b/plugins/modules/nomad_job.py
index 87e8ec04ca..9b3a670cd9 100644
--- a/plugins/modules/nomad_job.py
+++ b/plugins/modules/nomad_job.py
@@ -8,8 +8,7 @@
from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
module: nomad_job
author: FERREIRA Christophe (@chris93111)
version_added: "1.3.0"
@@ -17,53 +16,53 @@ short_description: Launch a Nomad Job
description:
- Launch a Nomad job.
- Stop a Nomad job.
- - Force start a Nomad job
+ - Force start a Nomad job.
requirements:
- python-nomad
extends_documentation_fragment:
- community.general.nomad
- community.general.attributes
attributes:
- check_mode:
- support: full
- diff_mode:
- support: none
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
options:
- name:
- description:
- - Name of job for delete, stop and start job without source.
- - Name of job for delete, stop and start job without source.
- - Either this or O(content) must be specified.
- type: str
- state:
- description:
- - Deploy or remove job.
- choices: ["present", "absent"]
- required: true
- type: str
- force_start:
- description:
- - Force job to started.
- type: bool
- default: false
- content:
- description:
- - Content of Nomad job.
- - Either this or O(name) must be specified.
- type: str
- content_format:
- description:
- - Type of content of Nomad job.
- choices: ["hcl", "json"]
- default: hcl
- type: str
+ name:
+ description:
+ - Name of job for delete, stop and start job without source.
+ - Name of job for delete, stop and start job without source.
+ - Either this or O(content) must be specified.
+ type: str
+ state:
+ description:
+ - Deploy or remove job.
+ choices: ["present", "absent"]
+ required: true
+ type: str
+ force_start:
+ description:
+ - Force job to started.
+ type: bool
+ default: false
+ content:
+ description:
+ - Content of Nomad job.
+ - Either this or O(name) must be specified.
+ type: str
+ content_format:
+ description:
+ - Type of content of Nomad job.
+ choices: ["hcl", "json"]
+ default: hcl
+ type: str
seealso:
- name: Nomad jobs documentation
description: Complete documentation for Nomad API jobs.
link: https://www.nomadproject.io/api-docs/jobs/
-'''
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Create job
community.general.nomad_job:
host: localhost
@@ -92,7 +91,7 @@ EXAMPLES = '''
name: api
timeout: 120
force_start: true
-'''
+"""
import json
diff --git a/plugins/modules/nomad_job_info.py b/plugins/modules/nomad_job_info.py
index bd7cf8ca98..b3703b64ce 100644
--- a/plugins/modules/nomad_job_info.py
+++ b/plugins/modules/nomad_job_info.py
@@ -8,15 +8,14 @@
from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
module: nomad_job_info
author: FERREIRA Christophe (@chris93111)
version_added: "1.3.0"
short_description: Get Nomad Jobs info
description:
- - Get info for one Nomad job.
- - List Nomad jobs.
+ - Get info for one Nomad job.
+ - List Nomad jobs.
requirements:
- python-nomad
extends_documentation_fragment:
@@ -24,18 +23,18 @@ extends_documentation_fragment:
- community.general.attributes
- community.general.attributes.info_module
options:
- name:
- description:
- - Name of job for Get info.
- - If not specified, lists all jobs.
- type: str
+ name:
+ description:
+ - Name of job for Get info.
+ - If not specified, lists all jobs.
+ type: str
seealso:
- name: Nomad jobs documentation
description: Complete documentation for Nomad API jobs.
link: https://www.nomadproject.io/api-docs/jobs/
-'''
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Get info for job awx
community.general.nomad_job_info:
host: localhost
@@ -46,225 +45,224 @@ EXAMPLES = '''
community.general.nomad_job_info:
host: localhost
register: result
+"""
-'''
-
-RETURN = '''
+RETURN = r"""
result:
- description: List with dictionary contains jobs info
- returned: success
- type: list
- sample: [
- {
+ description: List with dictionary contains jobs info.
+ returned: success
+ type: list
+ sample:
+ [
+ {
+ "Affinities": null,
+ "AllAtOnce": false,
+ "Constraints": null,
+ "ConsulToken": "",
+ "CreateIndex": 13,
+ "Datacenters": [
+ "dc1"
+ ],
+ "Dispatched": false,
+ "ID": "example",
+ "JobModifyIndex": 13,
+ "Meta": null,
+ "ModifyIndex": 13,
+ "Multiregion": null,
+ "Name": "example",
+ "Namespace": "default",
+ "NomadTokenID": "",
+ "ParameterizedJob": null,
+ "ParentID": "",
+ "Payload": null,
+ "Periodic": null,
+ "Priority": 50,
+ "Region": "global",
+ "Spreads": null,
+ "Stable": false,
+ "Status": "pending",
+ "StatusDescription": "",
+ "Stop": false,
+ "SubmitTime": 1602244370615307000,
+ "TaskGroups": [
+ {
"Affinities": null,
- "AllAtOnce": false,
"Constraints": null,
- "ConsulToken": "",
- "CreateIndex": 13,
- "Datacenters": [
- "dc1"
- ],
- "Dispatched": false,
- "ID": "example",
- "JobModifyIndex": 13,
- "Meta": null,
- "ModifyIndex": 13,
- "Multiregion": null,
- "Name": "example",
- "Namespace": "default",
- "NomadTokenID": "",
- "ParameterizedJob": null,
- "ParentID": "",
- "Payload": null,
- "Periodic": null,
- "Priority": 50,
- "Region": "global",
- "Spreads": null,
- "Stable": false,
- "Status": "pending",
- "StatusDescription": "",
- "Stop": false,
- "SubmitTime": 1602244370615307000,
- "TaskGroups": [
- {
- "Affinities": null,
- "Constraints": null,
- "Count": 1,
- "EphemeralDisk": {
- "Migrate": false,
- "SizeMB": 300,
- "Sticky": false
- },
- "Meta": null,
- "Migrate": {
- "HealthCheck": "checks",
- "HealthyDeadline": 300000000000,
- "MaxParallel": 1,
- "MinHealthyTime": 10000000000
- },
- "Name": "cache",
- "Networks": null,
- "ReschedulePolicy": {
- "Attempts": 0,
- "Delay": 30000000000,
- "DelayFunction": "exponential",
- "Interval": 0,
- "MaxDelay": 3600000000000,
- "Unlimited": true
- },
- "RestartPolicy": {
- "Attempts": 3,
- "Delay": 15000000000,
- "Interval": 1800000000000,
- "Mode": "fail"
- },
- "Scaling": null,
- "Services": null,
- "ShutdownDelay": null,
- "Spreads": null,
- "StopAfterClientDisconnect": null,
- "Tasks": [
- {
- "Affinities": null,
- "Artifacts": null,
- "CSIPluginConfig": null,
- "Config": {
- "image": "redis:3.2",
- "port_map": [
- {
- "db": 6379.0
- }
- ]
- },
- "Constraints": null,
- "DispatchPayload": null,
- "Driver": "docker",
- "Env": null,
- "KillSignal": "",
- "KillTimeout": 5000000000,
- "Kind": "",
- "Leader": false,
- "Lifecycle": null,
- "LogConfig": {
- "MaxFileSizeMB": 10,
- "MaxFiles": 10
- },
- "Meta": null,
- "Name": "redis",
- "Resources": {
- "CPU": 500,
- "Devices": null,
- "DiskMB": 0,
- "IOPS": 0,
- "MemoryMB": 256,
- "Networks": [
- {
- "CIDR": "",
- "DNS": null,
- "Device": "",
- "DynamicPorts": [
- {
- "HostNetwork": "default",
- "Label": "db",
- "To": 0,
- "Value": 0
- }
- ],
- "IP": "",
- "MBits": 10,
- "Mode": "",
- "ReservedPorts": null
- }
- ]
- },
- "RestartPolicy": {
- "Attempts": 3,
- "Delay": 15000000000,
- "Interval": 1800000000000,
- "Mode": "fail"
- },
- "Services": [
- {
- "AddressMode": "auto",
- "CanaryMeta": null,
- "CanaryTags": null,
- "Checks": [
- {
- "AddressMode": "",
- "Args": null,
- "CheckRestart": null,
- "Command": "",
- "Expose": false,
- "FailuresBeforeCritical": 0,
- "GRPCService": "",
- "GRPCUseTLS": false,
- "Header": null,
- "InitialStatus": "",
- "Interval": 10000000000,
- "Method": "",
- "Name": "alive",
- "Path": "",
- "PortLabel": "",
- "Protocol": "",
- "SuccessBeforePassing": 0,
- "TLSSkipVerify": false,
- "TaskName": "",
- "Timeout": 2000000000,
- "Type": "tcp"
- }
- ],
- "Connect": null,
- "EnableTagOverride": false,
- "Meta": null,
- "Name": "redis-cache",
- "PortLabel": "db",
- "Tags": [
- "global",
- "cache"
- ],
- "TaskName": ""
- }
- ],
- "ShutdownDelay": 0,
- "Templates": null,
- "User": "",
- "Vault": null,
- "VolumeMounts": null
- }
- ],
- "Update": {
- "AutoPromote": false,
- "AutoRevert": false,
- "Canary": 0,
- "HealthCheck": "checks",
- "HealthyDeadline": 180000000000,
- "MaxParallel": 1,
- "MinHealthyTime": 10000000000,
- "ProgressDeadline": 600000000000,
- "Stagger": 30000000000
- },
- "Volumes": null
- }
- ],
- "Type": "service",
- "Update": {
- "AutoPromote": false,
- "AutoRevert": false,
- "Canary": 0,
- "HealthCheck": "",
- "HealthyDeadline": 0,
- "MaxParallel": 1,
- "MinHealthyTime": 0,
- "ProgressDeadline": 0,
- "Stagger": 30000000000
+ "Count": 1,
+ "EphemeralDisk": {
+ "Migrate": false,
+ "SizeMB": 300,
+ "Sticky": false
},
- "VaultNamespace": "",
- "VaultToken": "",
- "Version": 0
- }
+ "Meta": null,
+ "Migrate": {
+ "HealthCheck": "checks",
+ "HealthyDeadline": 300000000000,
+ "MaxParallel": 1,
+ "MinHealthyTime": 10000000000
+ },
+ "Name": "cache",
+ "Networks": null,
+ "ReschedulePolicy": {
+ "Attempts": 0,
+ "Delay": 30000000000,
+ "DelayFunction": "exponential",
+ "Interval": 0,
+ "MaxDelay": 3600000000000,
+ "Unlimited": true
+ },
+ "RestartPolicy": {
+ "Attempts": 3,
+ "Delay": 15000000000,
+ "Interval": 1800000000000,
+ "Mode": "fail"
+ },
+ "Scaling": null,
+ "Services": null,
+ "ShutdownDelay": null,
+ "Spreads": null,
+ "StopAfterClientDisconnect": null,
+ "Tasks": [
+ {
+ "Affinities": null,
+ "Artifacts": null,
+ "CSIPluginConfig": null,
+ "Config": {
+ "image": "redis:3.2",
+ "port_map": [
+ {
+ "db": 6379.0
+ }
+ ]
+ },
+ "Constraints": null,
+ "DispatchPayload": null,
+ "Driver": "docker",
+ "Env": null,
+ "KillSignal": "",
+ "KillTimeout": 5000000000,
+ "Kind": "",
+ "Leader": false,
+ "Lifecycle": null,
+ "LogConfig": {
+ "MaxFileSizeMB": 10,
+ "MaxFiles": 10
+ },
+ "Meta": null,
+ "Name": "redis",
+ "Resources": {
+ "CPU": 500,
+ "Devices": null,
+ "DiskMB": 0,
+ "IOPS": 0,
+ "MemoryMB": 256,
+ "Networks": [
+ {
+ "CIDR": "",
+ "DNS": null,
+ "Device": "",
+ "DynamicPorts": [
+ {
+ "HostNetwork": "default",
+ "Label": "db",
+ "To": 0,
+ "Value": 0
+ }
+ ],
+ "IP": "",
+ "MBits": 10,
+ "Mode": "",
+ "ReservedPorts": null
+ }
+ ]
+ },
+ "RestartPolicy": {
+ "Attempts": 3,
+ "Delay": 15000000000,
+ "Interval": 1800000000000,
+ "Mode": "fail"
+ },
+ "Services": [
+ {
+ "AddressMode": "auto",
+ "CanaryMeta": null,
+ "CanaryTags": null,
+ "Checks": [
+ {
+ "AddressMode": "",
+ "Args": null,
+ "CheckRestart": null,
+ "Command": "",
+ "Expose": false,
+ "FailuresBeforeCritical": 0,
+ "GRPCService": "",
+ "GRPCUseTLS": false,
+ "Header": null,
+ "InitialStatus": "",
+ "Interval": 10000000000,
+ "Method": "",
+ "Name": "alive",
+ "Path": "",
+ "PortLabel": "",
+ "Protocol": "",
+ "SuccessBeforePassing": 0,
+ "TLSSkipVerify": false,
+ "TaskName": "",
+ "Timeout": 2000000000,
+ "Type": "tcp"
+ }
+ ],
+ "Connect": null,
+ "EnableTagOverride": false,
+ "Meta": null,
+ "Name": "redis-cache",
+ "PortLabel": "db",
+ "Tags": [
+ "global",
+ "cache"
+ ],
+ "TaskName": ""
+ }
+ ],
+ "ShutdownDelay": 0,
+ "Templates": null,
+ "User": "",
+ "Vault": null,
+ "VolumeMounts": null
+ }
+ ],
+ "Update": {
+ "AutoPromote": false,
+ "AutoRevert": false,
+ "Canary": 0,
+ "HealthCheck": "checks",
+ "HealthyDeadline": 180000000000,
+ "MaxParallel": 1,
+ "MinHealthyTime": 10000000000,
+ "ProgressDeadline": 600000000000,
+ "Stagger": 30000000000
+ },
+ "Volumes": null
+ }
+ ],
+ "Type": "service",
+ "Update": {
+ "AutoPromote": false,
+ "AutoRevert": false,
+ "Canary": 0,
+ "HealthCheck": "",
+ "HealthyDeadline": 0,
+ "MaxParallel": 1,
+ "MinHealthyTime": 0,
+ "ProgressDeadline": 0,
+ "Stagger": 30000000000
+ },
+ "VaultNamespace": "",
+ "VaultToken": "",
+ "Version": 0
+ }
]
-
-'''
+"""
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
from ansible.module_utils.common.text.converters import to_native
diff --git a/plugins/modules/nomad_token.py b/plugins/modules/nomad_token.py
index 51a2f97163..c189bf4b85 100644
--- a/plugins/modules/nomad_token.py
+++ b/plugins/modules/nomad_token.py
@@ -9,60 +9,59 @@ from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
module: nomad_token
author: Pedro Nascimento (@apecnascimento)
version_added: "8.1.0"
short_description: Manage Nomad ACL tokens
description:
- - This module allows to create Bootstrap tokens, create ACL tokens, update ACL tokens, and delete ACL tokens.
+ - This module allows to create Bootstrap tokens, create ACL tokens, update ACL tokens, and delete ACL tokens.
requirements:
- - python-nomad
+ - python-nomad
extends_documentation_fragment:
- - community.general.nomad
- - community.general.attributes
+ - community.general.nomad
+ - community.general.attributes
attributes:
- check_mode:
- support: none
- diff_mode:
- support: none
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
options:
- name:
- description:
- - Name of ACL token to create.
- type: str
- token_type:
- description:
- - The type of the token can be V(client), V(management), or V(bootstrap).
- choices: ["client", "management", "bootstrap"]
- type: str
- default: "client"
- policies:
- description:
- - A list of the policies assigned to the token.
- type: list
- elements: str
- default: []
- global_replicated:
- description:
- - Indicates whether or not the token was created with the C(--global).
- type: bool
- default: false
- state:
- description:
- - Create or remove ACL token.
- choices: ["present", "absent"]
- required: true
- type: str
+ name:
+ description:
+ - Name of ACL token to create.
+ type: str
+ token_type:
+ description:
+ - The type of the token can be V(client), V(management), or V(bootstrap).
+ choices: ["client", "management", "bootstrap"]
+ type: str
+ default: "client"
+ policies:
+ description:
+ - A list of the policies assigned to the token.
+ type: list
+ elements: str
+ default: []
+ global_replicated:
+ description:
+ - Indicates whether or not the token was created with the C(--global).
+ type: bool
+ default: false
+ state:
+ description:
+ - Create or remove ACL token.
+ choices: ["present", "absent"]
+ required: true
+ type: str
seealso:
- name: Nomad ACL documentation
description: Complete documentation for Nomad API ACL.
link: https://developer.hashicorp.com/nomad/api-docs/acl/tokens
-'''
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Create boostrap token
community.general.nomad_token:
host: localhost
@@ -75,7 +74,7 @@ EXAMPLES = '''
name: "Dev token"
token_type: client
policies:
- - readonly
+ - readonly
global_replicated: false
state: absent
@@ -85,8 +84,8 @@ EXAMPLES = '''
name: "Dev token"
token_type: client
policies:
- - readonly
- - devpolicy
+ - readonly
+ - devpolicy
global_replicated: false
state: absent
@@ -95,31 +94,32 @@ EXAMPLES = '''
host: localhost
name: "Dev token"
state: absent
-'''
+"""
-RETURN = '''
+RETURN = r"""
result:
- description: Result returned by nomad.
- returned: always
- type: dict
- sample: {
- "accessor_id": "0d01c55f-8d63-f832-04ff-1866d4eb594e",
- "create_index": 14,
- "create_time": "2023-11-12T18:48:34.248857001Z",
- "expiration_time": null,
- "expiration_ttl": "",
- "global": true,
- "hash": "eSn8H8RVqh8As8WQNnC2vlBRqXy6DECogc5umzX0P30=",
- "modify_index": 836,
- "name": "devs",
- "policies": [
- "readonly"
- ],
- "roles": null,
- "secret_id": "12e878ab-e1f6-e103-b4c4-3b5173bb4cea",
- "type": "client"
+ description: Result returned by nomad.
+ returned: always
+ type: dict
+ sample:
+ {
+ "accessor_id": "0d01c55f-8d63-f832-04ff-1866d4eb594e",
+ "create_index": 14,
+ "create_time": "2023-11-12T18:48:34.248857001Z",
+ "expiration_time": null,
+ "expiration_ttl": "",
+ "global": true,
+ "hash": "eSn8H8RVqh8As8WQNnC2vlBRqXy6DECogc5umzX0P30=",
+ "modify_index": 836,
+ "name": "devs",
+ "policies": [
+ "readonly"
+ ],
+ "roles": null,
+ "secret_id": "12e878ab-e1f6-e103-b4c4-3b5173bb4cea",
+ "type": "client"
}
-'''
+"""
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
from ansible.module_utils.common.text.converters import to_native
diff --git a/plugins/modules/nosh.py b/plugins/modules/nosh.py
index 0e03142d81..7cd4f4ad66 100644
--- a/plugins/modules/nosh.py
+++ b/plugins/modules/nosh.py
@@ -9,67 +9,61 @@ from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
module: nosh
author:
- - "Thomas Caravia (@tacatac)"
+ - "Thomas Caravia (@tacatac)"
short_description: Manage services with nosh
description:
- - Control running and enabled state for system-wide or user services.
- - BSD and Linux systems are supported.
+ - Control running and enabled state for system-wide or user services.
+ - BSD and Linux systems are supported.
extends_documentation_fragment:
- - community.general.attributes
+ - community.general.attributes
attributes:
- check_mode:
- support: full
- diff_mode:
- support: none
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
options:
- name:
- type: str
- required: true
- description:
- - Name of the service to manage.
- state:
- type: str
- required: false
- choices: [ started, stopped, reset, restarted, reloaded ]
- description:
- - V(started)/V(stopped) are idempotent actions that will not run
- commands unless necessary.
- V(restarted) will always bounce the service.
- V(reloaded) will send a SIGHUP or start the service.
- V(reset) will start or stop the service according to whether it is
- enabled or not.
- enabled:
- required: false
- type: bool
- description:
- - Enable or disable the service, independently of C(*.preset) file
- preference or running state. Mutually exclusive with O(preset). Will take
- effect prior to O(state=reset).
- preset:
- required: false
- type: bool
- description:
- - Enable or disable the service according to local preferences in C(*.preset) files.
- Mutually exclusive with O(enabled). Only has an effect if set to true. Will take
- effect prior to O(state=reset).
- user:
- required: false
- default: false
- type: bool
- description:
- - Run system-control talking to the calling user's service manager, rather than
- the system-wide service manager.
+ name:
+ type: str
+ required: true
+ description:
+ - Name of the service to manage.
+ state:
+ type: str
+ required: false
+ choices: [started, stopped, reset, restarted, reloaded]
+ description:
+ - V(started)/V(stopped) are idempotent actions that do not run commands unless necessary.
+ - V(restarted) always bounces the service.
+ - V(reloaded) sends a SIGHUP or starts the service.
+ - V(reset) starts or stops the service according to whether it is enabled or not.
+ enabled:
+ required: false
+ type: bool
+ description:
+ - Enable or disable the service, independently of C(*.preset) file preference or running state. Mutually exclusive with
+ O(preset). It takes effect prior to O(state=reset).
+ preset:
+ required: false
+ type: bool
+ description:
+ - Enable or disable the service according to local preferences in C(*.preset) files. Mutually exclusive with O(enabled).
+ Only has an effect if set to true. It takes effect prior to O(state=reset).
+ user:
+ required: false
+ default: false
+ type: bool
+ description:
+ - Run system-control talking to the calling user's service manager, rather than the system-wide service manager.
requirements:
- - A system with an active nosh service manager, see Notes for further information.
+ - A system with an active nosh service manager, see Notes for further information.
notes:
- - Information on the nosh utilities suite may be found at U(https://jdebp.eu/Softwares/nosh/).
-'''
+ - Information on the nosh utilities suite may be found at U(https://jdebp.eu/Softwares/nosh/).
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Start dnscache if not running
community.general.nosh:
name: dnscache
@@ -122,215 +116,216 @@ EXAMPLES = '''
ansible.builtin.fail:
msg: "The {{ result.name }} service is running"
when: result.status and result.status['DaemontoolsEncoreState'] == "running"
-'''
+"""
-RETURN = '''
+RETURN = r"""
name:
- description: name used to find the service
- returned: success
- type: str
- sample: "sshd"
+ description: Name used to find the service.
+ returned: success
+ type: str
+ sample: "sshd"
service_path:
- description: resolved path for the service
- returned: success
- type: str
- sample: "/var/sv/sshd"
+ description: Resolved path for the service.
+ returned: success
+ type: str
+ sample: "/var/sv/sshd"
enabled:
- description: whether the service is enabled at system bootstrap
- returned: success
- type: bool
- sample: true
+ description: Whether the service is enabled at system bootstrap.
+ returned: success
+ type: bool
+ sample: true
preset:
- description: whether the enabled status reflects the one set in the relevant C(*.preset) file
- returned: success
- type: bool
- sample: 'False'
+ description: Whether the enabled status reflects the one set in the relevant C(*.preset) file.
+ returned: success
+ type: bool
+ sample: 'False'
state:
- description: service process run state, V(none) if the service is not loaded and will not be started
- returned: if state option is used
- type: str
- sample: "reloaded"
+ description: Service process run state, V(none) if the service is not loaded and will not be started.
+ returned: if state option is used
+ type: str
+ sample: "reloaded"
status:
- description: A dictionary with the key=value pairs returned by C(system-control show-json) or V(none) if the service is not loaded
- returned: success
- type: complex
- contains:
- After:
- description: [] # FIXME
- returned: success
- type: list
- sample: ["/etc/service-bundles/targets/basic","../sshdgenkeys", "log"]
- Before:
- description: [] # FIXME
- returned: success
- type: list
- sample: ["/etc/service-bundles/targets/shutdown"]
- Conflicts:
- description: [] # FIXME
- returned: success
- type: list
- sample: []
- DaemontoolsEncoreState:
- description: [] # FIXME
- returned: success
- type: str
- sample: "running"
- DaemontoolsState:
- description: [] # FIXME
- returned: success
- type: str
- sample: "up"
- Enabled:
- description: [] # FIXME
- returned: success
- type: bool
- sample: true
- LogService:
- description: [] # FIXME
- returned: success
- type: str
- sample: "../cyclog@sshd"
- MainPID:
- description: [] # FIXME
- returned: success
- type: int
- sample: 661
- Paused:
- description: [] # FIXME
- returned: success
- type: bool
- sample: 'False'
- ReadyAfterRun:
- description: [] # FIXME
- returned: success
- type: bool
- sample: 'False'
- RemainAfterExit:
- description: [] # FIXME
- returned: success
- type: bool
- sample: 'False'
- Required-By:
- description: [] # FIXME
- returned: success
- type: list
- sample: []
- RestartExitStatusCode:
- description: [] # FIXME
- returned: success
- type: int
- sample: '0'
- RestartExitStatusNumber:
- description: [] # FIXME
- returned: success
- type: int
- sample: '0'
- RestartTimestamp:
- description: [] # FIXME
- returned: success
- type: int
- sample: 4611686019935648081
- RestartUTCTimestamp:
- description: [] # FIXME
- returned: success
- type: int
- sample: 1508260140
- RunExitStatusCode:
- description: [] # FIXME
- returned: success
- type: int
- sample: '0'
- RunExitStatusNumber:
- description: [] # FIXME
- returned: success
- type: int
- sample: '0'
- RunTimestamp:
- description: [] # FIXME
- returned: success
- type: int
- sample: 4611686019935648081
- RunUTCTimestamp:
- description: [] # FIXME
- returned: success
- type: int
- sample: 1508260140
- StartExitStatusCode:
- description: [] # FIXME
- returned: success
- type: int
- sample: 1
- StartExitStatusNumber:
- description: [] # FIXME
- returned: success
- type: int
- sample: '0'
- StartTimestamp:
- description: [] # FIXME
- returned: success
- type: int
- sample: 4611686019935648081
- StartUTCTimestamp:
- description: [] # FIXME
- returned: success
- type: int
- sample: 1508260140
- StopExitStatusCode:
- description: [] # FIXME
- returned: success
- type: int
- sample: '0'
- StopExitStatusNumber:
- description: [] # FIXME
- returned: success
- type: int
- sample: '0'
- StopTimestamp:
- description: [] # FIXME
- returned: success
- type: int
- sample: 4611686019935648081
- StopUTCTimestamp:
- description: [] # FIXME
- returned: success
- type: int
- sample: 1508260140
- Stopped-By:
- description: [] # FIXME
- returned: success
- type: list
- sample: ["/etc/service-bundles/targets/shutdown"]
- Timestamp:
- description: [] # FIXME
- returned: success
- type: int
- sample: 4611686019935648081
- UTCTimestamp:
- description: [] # FIXME
- returned: success
- type: int
- sample: 1508260140
- Want:
- description: [] # FIXME
- returned: success
- type: str
- sample: "nothing"
- Wanted-By:
- description: [] # FIXME
- returned: success
- type: list
- sample: ["/etc/service-bundles/targets/server","/etc/service-bundles/targets/sockets"]
- Wants:
- description: [] # FIXME
- returned: success
- type: list
- sample: ["/etc/service-bundles/targets/basic","../sshdgenkeys"]
+ description: A dictionary with the key=value pairs returned by C(system-control show-json) or V(none) if the service is
+ not loaded.
+ returned: success
+ type: complex
+ contains:
+ After:
+ description: [] # FIXME
+ returned: success
+ type: list
+ sample: ["/etc/service-bundles/targets/basic", "../sshdgenkeys", "log"]
+ Before:
+ description: [] # FIXME
+ returned: success
+ type: list
+ sample: ["/etc/service-bundles/targets/shutdown"]
+ Conflicts:
+ description: [] # FIXME
+ returned: success
+ type: list
+ sample: []
+ DaemontoolsEncoreState:
+ description: [] # FIXME
+ returned: success
+ type: str
+ sample: "running"
+ DaemontoolsState:
+ description: [] # FIXME
+ returned: success
+ type: str
+ sample: "up"
+ Enabled:
+ description: [] # FIXME
+ returned: success
+ type: bool
+ sample: true
+ LogService:
+ description: [] # FIXME
+ returned: success
+ type: str
+ sample: "../cyclog@sshd"
+ MainPID:
+ description: [] # FIXME
+ returned: success
+ type: int
+ sample: 661
+ Paused:
+ description: [] # FIXME
+ returned: success
+ type: bool
+ sample: 'False'
+ ReadyAfterRun:
+ description: [] # FIXME
+ returned: success
+ type: bool
+ sample: 'False'
+ RemainAfterExit:
+ description: [] # FIXME
+ returned: success
+ type: bool
+ sample: 'False'
+ Required-By:
+ description: [] # FIXME
+ returned: success
+ type: list
+ sample: []
+ RestartExitStatusCode:
+ description: [] # FIXME
+ returned: success
+ type: int
+ sample: '0'
+ RestartExitStatusNumber:
+ description: [] # FIXME
+ returned: success
+ type: int
+ sample: '0'
+ RestartTimestamp:
+ description: [] # FIXME
+ returned: success
+ type: int
+ sample: 4611686019935648081
+ RestartUTCTimestamp:
+ description: [] # FIXME
+ returned: success
+ type: int
+ sample: 1508260140
+ RunExitStatusCode:
+ description: [] # FIXME
+ returned: success
+ type: int
+ sample: '0'
+ RunExitStatusNumber:
+ description: [] # FIXME
+ returned: success
+ type: int
+ sample: '0'
+ RunTimestamp:
+ description: [] # FIXME
+ returned: success
+ type: int
+ sample: 4611686019935648081
+ RunUTCTimestamp:
+ description: [] # FIXME
+ returned: success
+ type: int
+ sample: 1508260140
+ StartExitStatusCode:
+ description: [] # FIXME
+ returned: success
+ type: int
+ sample: 1
+ StartExitStatusNumber:
+ description: [] # FIXME
+ returned: success
+ type: int
+ sample: '0'
+ StartTimestamp:
+ description: [] # FIXME
+ returned: success
+ type: int
+ sample: 4611686019935648081
+ StartUTCTimestamp:
+ description: [] # FIXME
+ returned: success
+ type: int
+ sample: 1508260140
+ StopExitStatusCode:
+ description: [] # FIXME
+ returned: success
+ type: int
+ sample: '0'
+ StopExitStatusNumber:
+ description: [] # FIXME
+ returned: success
+ type: int
+ sample: '0'
+ StopTimestamp:
+ description: [] # FIXME
+ returned: success
+ type: int
+ sample: 4611686019935648081
+ StopUTCTimestamp:
+ description: [] # FIXME
+ returned: success
+ type: int
+ sample: 1508260140
+ Stopped-By:
+ description: [] # FIXME
+ returned: success
+ type: list
+ sample: ["/etc/service-bundles/targets/shutdown"]
+ Timestamp:
+ description: [] # FIXME
+ returned: success
+ type: int
+ sample: 4611686019935648081
+ UTCTimestamp:
+ description: [] # FIXME
+ returned: success
+ type: int
+ sample: 1508260140
+ Want:
+ description: [] # FIXME
+ returned: success
+ type: str
+ sample: "nothing"
+ Wanted-By:
+ description: [] # FIXME
+ returned: success
+ type: list
+ sample: ["/etc/service-bundles/targets/server", "/etc/service-bundles/targets/sockets"]
+ Wants:
+ description: [] # FIXME
+ returned: success
+ type: list
+ sample: ["/etc/service-bundles/targets/basic", "../sshdgenkeys"]
user:
- description: whether the user-level service manager is called
- returned: success
- type: bool
- sample: false
-'''
+ description: Whether the user-level service manager is called.
+ returned: success
+ type: bool
+ sample: false
+"""
import json
diff --git a/plugins/modules/npm.py b/plugins/modules/npm.py
index a906b2c127..25b116f2e8 100644
--- a/plugins/modules/npm.py
+++ b/plugins/modules/npm.py
@@ -8,8 +8,7 @@ from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = r'''
----
+DOCUMENTATION = r"""
module: npm
short_description: Manage node.js packages with npm
description:
@@ -83,7 +82,7 @@ options:
required: false
type: str
default: present
- choices: [ "present", "absent", "latest" ]
+ choices: ["present", "absent", "latest"]
no_optional:
description:
- Use the C(--no-optional) flag when installing.
@@ -103,10 +102,10 @@ options:
default: false
version_added: 9.5.0
requirements:
- - npm installed in bin path (recommended /usr/local/bin)
-'''
+ - npm installed in bin path (recommended /usr/local/bin)
+"""
-EXAMPLES = r'''
+EXAMPLES = r"""
- name: Install "coffee-script" node.js package.
community.general.npm:
name: coffee-script
@@ -153,7 +152,7 @@ EXAMPLES = r'''
path: /app/location
executable: /opt/nvm/v0.10.1/bin/npm
state: present
-'''
+"""
import json
import os
diff --git a/plugins/modules/nsupdate.py b/plugins/modules/nsupdate.py
index c9a6ba2133..4049996ca3 100644
--- a/plugins/modules/nsupdate.py
+++ b/plugins/modules/nsupdate.py
@@ -14,89 +14,87 @@ from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
module: nsupdate
short_description: Manage DNS records
description:
- - Create, update and remove DNS records using DDNS updates
+ - Create, update and remove DNS records using DDNS updates.
requirements:
- dnspython
author: "Loic Blot (@nerzhul)"
extends_documentation_fragment:
- community.general.attributes
attributes:
- check_mode:
- support: full
- diff_mode:
- support: none
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
options:
- state:
- description:
- - Manage DNS record.
- choices: ['present', 'absent']
- default: 'present'
- type: str
- server:
- description:
- - Apply DNS modification on this server, specified by IPv4 or IPv6 address.
- required: true
- type: str
- port:
- description:
- - Use this TCP port when connecting to O(server).
- default: 53
- type: int
- key_name:
- description:
- - Use TSIG key name to authenticate against DNS O(server)
- type: str
- key_secret:
- description:
- - Use TSIG key secret, associated with O(key_name), to authenticate against O(server)
- type: str
- key_algorithm:
- description:
- - Specify key algorithm used by O(key_secret).
- choices: ['HMAC-MD5.SIG-ALG.REG.INT', 'hmac-md5', 'hmac-sha1', 'hmac-sha224', 'hmac-sha256', 'hmac-sha384',
- 'hmac-sha512']
- default: 'hmac-md5'
- type: str
- zone:
- description:
- - DNS record will be modified on this O(zone).
- - When omitted DNS will be queried to attempt finding the correct zone.
- type: str
- record:
- description:
- - Sets the DNS record to modify. When zone is omitted this has to be absolute (ending with a dot).
- required: true
- type: str
- type:
- description:
- - Sets the record type.
- default: 'A'
- type: str
- ttl:
- description:
- - Sets the record TTL.
- default: 3600
- type: int
- value:
- description:
- - Sets the record value.
- type: list
- elements: str
- protocol:
- description:
- - Sets the transport protocol (TCP or UDP). TCP is the recommended and a more robust option.
- default: 'tcp'
- choices: ['tcp', 'udp']
- type: str
-'''
+ state:
+ description:
+ - Manage DNS record.
+ choices: ['present', 'absent']
+ default: 'present'
+ type: str
+ server:
+ description:
+ - Apply DNS modification on this server, specified by IPv4 or IPv6 address.
+ required: true
+ type: str
+ port:
+ description:
+ - Use this TCP port when connecting to O(server).
+ default: 53
+ type: int
+ key_name:
+ description:
+ - Use TSIG key name to authenticate against DNS O(server).
+ type: str
+ key_secret:
+ description:
+ - Use TSIG key secret, associated with O(key_name), to authenticate against O(server).
+ type: str
+ key_algorithm:
+ description:
+ - Specify key algorithm used by O(key_secret).
+ choices: ['HMAC-MD5.SIG-ALG.REG.INT', 'hmac-md5', 'hmac-sha1', 'hmac-sha224', 'hmac-sha256', 'hmac-sha384', 'hmac-sha512']
+ default: 'hmac-md5'
+ type: str
+ zone:
+ description:
+ - DNS record is modified on this O(zone).
+ - When omitted, DNS is queried to attempt finding the correct zone.
+ type: str
+ record:
+ description:
+ - Sets the DNS record to modify. When zone is omitted this has to be absolute (ending with a dot).
+ required: true
+ type: str
+ type:
+ description:
+ - Sets the record type.
+ default: 'A'
+ type: str
+ ttl:
+ description:
+ - Sets the record TTL.
+ default: 3600
+ type: int
+ value:
+ description:
+ - Sets the record value.
+ type: list
+ elements: str
+ protocol:
+ description:
+ - Sets the transport protocol (TCP or UDP). TCP is the recommended and a more robust option.
+ default: 'tcp'
+ choices: ['tcp', 'udp']
+ type: str
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Add or modify ansible.example.org A to 192.168.1.1"
community.general.nsupdate:
key_name: "nsupdate"
@@ -143,49 +141,45 @@ EXAMPLES = '''
record: "1.1.168.192.in-addr.arpa."
type: "PTR"
state: absent
-'''
+"""
-RETURN = '''
-changed:
- description: If module has modified record
- returned: success
- type: str
+RETURN = r"""
record:
- description: DNS record
- returned: success
- type: str
- sample: 'ansible'
+ description: DNS record.
+ returned: success
+ type: str
+ sample: 'ansible'
ttl:
- description: DNS record TTL
- returned: success
- type: int
- sample: 86400
+ description: DNS record TTL.
+ returned: success
+ type: int
+ sample: 86400
type:
- description: DNS record type
- returned: success
- type: str
- sample: 'CNAME'
+ description: DNS record type.
+ returned: success
+ type: str
+ sample: 'CNAME'
value:
- description: DNS record value(s)
- returned: success
- type: list
- sample: '192.168.1.1'
+ description: DNS record value(s).
+ returned: success
+ type: list
+ sample: '192.168.1.1'
zone:
- description: DNS record zone
- returned: success
- type: str
- sample: 'example.org.'
+ description: DNS record zone.
+ returned: success
+ type: str
+ sample: 'example.org.'
dns_rc:
- description: dnspython return code
- returned: always
- type: int
- sample: 4
+ description: C(dnspython) return code.
+ returned: always
+ type: int
+ sample: 4
dns_rc_str:
- description: dnspython return code (string representation)
- returned: always
- type: str
- sample: 'REFUSED'
-'''
+ description: C(dnspython) return code (string representation).
+ returned: always
+ type: str
+ sample: 'REFUSED'
+"""
import traceback
@@ -478,18 +472,18 @@ def main():
module = AnsibleModule(
argument_spec=dict(
- state=dict(required=False, default='present', choices=['present', 'absent'], type='str'),
+ state=dict(default='present', choices=['present', 'absent'], type='str'),
server=dict(required=True, type='str'),
- port=dict(required=False, default=53, type='int'),
- key_name=dict(required=False, type='str'),
- key_secret=dict(required=False, type='str', no_log=True),
- key_algorithm=dict(required=False, default='hmac-md5', choices=tsig_algs, type='str'),
- zone=dict(required=False, default=None, type='str'),
+ port=dict(default=53, type='int'),
+ key_name=dict(type='str'),
+ key_secret=dict(type='str', no_log=True),
+ key_algorithm=dict(default='hmac-md5', choices=tsig_algs, type='str'),
+ zone=dict(type='str'),
record=dict(required=True, type='str'),
- type=dict(required=False, default='A', type='str'),
- ttl=dict(required=False, default=3600, type='int'),
- value=dict(required=False, default=None, type='list', elements='str'),
- protocol=dict(required=False, default='tcp', choices=['tcp', 'udp'], type='str')
+ type=dict(default='A', type='str'),
+ ttl=dict(default=3600, type='int'),
+ value=dict(type='list', elements='str'),
+ protocol=dict(default='tcp', choices=['tcp', 'udp'], type='str')
),
supports_check_mode=True
)
diff --git a/plugins/modules/ocapi_command.py b/plugins/modules/ocapi_command.py
index b6b9b6b98e..39269c99cb 100644
--- a/plugins/modules/ocapi_command.py
+++ b/plugins/modules/ocapi_command.py
@@ -8,14 +8,12 @@
from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
module: ocapi_command
version_added: 6.3.0
short_description: Manages Out-Of-Band controllers using Open Composable API (OCAPI)
description:
- - Builds OCAPI URIs locally and sends them to remote OOB controllers to
- perform an action.
+ - Builds OCAPI URIs locally and sends them to remote OOB controllers to perform an action.
- Manages OOB controller such as Indicator LED, Reboot, Power Mode, Firmware Update.
extends_documentation_fragment:
- community.general.attributes
@@ -41,7 +39,7 @@ options:
- Base URI of OOB controller.
type: str
proxy_slot_number:
- description: For proxied inband requests, the slot number of the IOM. Only applies if O(baseuri) is a proxy server.
+ description: For proxied inband requests, the slot number of the IOM. Only applies if O(baseuri) is a proxy server.
type: int
update_image_path:
required: false
@@ -70,104 +68,104 @@ options:
type: int
author: "Mike Moerk (@mikemoerk)"
-'''
+"""
-EXAMPLES = '''
- - name: Set the power state to low
- community.general.ocapi_command:
- category: Chassis
- command: PowerModeLow
- baseuri: "{{ baseuri }}"
- username: "{{ username }}"
- password: "{{ password }}"
+EXAMPLES = r"""
+- name: Set the power state to low
+ community.general.ocapi_command:
+ category: Chassis
+ command: PowerModeLow
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
- - name: Set the power state to normal
- community.general.ocapi_command:
- category: Chassis
- command: PowerModeNormal
- baseuri: "{{ baseuri }}"
- username: "{{ username }}"
- password: "{{ password }}"
- - name: Set chassis indicator LED to on
- community.general.ocapi_command:
- category: Chassis
- command: IndicatorLedOn
- baseuri: "{{ baseuri }}"
- proxy_slot_number: 2
- username: "{{ username }}"
- password: "{{ password }}"
- - name: Set chassis indicator LED to off
- community.general.ocapi_command:
- category: Chassis
- command: IndicatorLedOff
- baseuri: "{{ baseuri }}"
- proxy_slot_number: 2
- username: "{{ username }}"
- password: "{{ password }}"
- - name: Reset Enclosure
- community.general.ocapi_command:
- category: Systems
- command: PowerGracefulRestart
- baseuri: "{{ baseuri }}"
- proxy_slot_number: 2
- username: "{{ username }}"
- password: "{{ password }}"
- - name: Firmware Upload
- community.general.ocapi_command:
- category: Update
- command: FWUpload
- baseuri: "iom1.wdc.com"
- proxy_slot_number: 2
- username: "{{ username }}"
- password: "{{ password }}"
- update_image_path: "/path/to/firmware.tar.gz"
- - name: Firmware Update
- community.general.ocapi_command:
- category: Update
- command: FWUpdate
- baseuri: "iom1.wdc.com"
- proxy_slot_number: 2
- username: "{{ username }}"
- password: "{{ password }}"
- - name: Firmware Activate
- community.general.ocapi_command:
- category: Update
- command: FWActivate
- baseuri: "iom1.wdc.com"
- proxy_slot_number: 2
- username: "{{ username }}"
- password: "{{ password }}"
- - name: Delete Job
- community.general.ocapi_command:
- category: Jobs
- command: DeleteJob
- job_name: FirmwareUpdate
- baseuri: "{{ baseuri }}"
- proxy_slot_number: 2
- username: "{{ username }}"
- password: "{{ password }}"
-'''
+- name: Set the power state to normal
+ community.general.ocapi_command:
+ category: Chassis
+ command: PowerModeNormal
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+- name: Set chassis indicator LED to on
+ community.general.ocapi_command:
+ category: Chassis
+ command: IndicatorLedOn
+ baseuri: "{{ baseuri }}"
+ proxy_slot_number: 2
+ username: "{{ username }}"
+ password: "{{ password }}"
+- name: Set chassis indicator LED to off
+ community.general.ocapi_command:
+ category: Chassis
+ command: IndicatorLedOff
+ baseuri: "{{ baseuri }}"
+ proxy_slot_number: 2
+ username: "{{ username }}"
+ password: "{{ password }}"
+- name: Reset Enclosure
+ community.general.ocapi_command:
+ category: Systems
+ command: PowerGracefulRestart
+ baseuri: "{{ baseuri }}"
+ proxy_slot_number: 2
+ username: "{{ username }}"
+ password: "{{ password }}"
+- name: Firmware Upload
+ community.general.ocapi_command:
+ category: Update
+ command: FWUpload
+ baseuri: "iom1.wdc.com"
+ proxy_slot_number: 2
+ username: "{{ username }}"
+ password: "{{ password }}"
+ update_image_path: "/path/to/firmware.tar.gz"
+- name: Firmware Update
+ community.general.ocapi_command:
+ category: Update
+ command: FWUpdate
+ baseuri: "iom1.wdc.com"
+ proxy_slot_number: 2
+ username: "{{ username }}"
+ password: "{{ password }}"
+- name: Firmware Activate
+ community.general.ocapi_command:
+ category: Update
+ command: FWActivate
+ baseuri: "iom1.wdc.com"
+ proxy_slot_number: 2
+ username: "{{ username }}"
+ password: "{{ password }}"
+- name: Delete Job
+ community.general.ocapi_command:
+ category: Jobs
+ command: DeleteJob
+ job_name: FirmwareUpdate
+ baseuri: "{{ baseuri }}"
+ proxy_slot_number: 2
+ username: "{{ username }}"
+ password: "{{ password }}"
+"""
-RETURN = '''
+RETURN = r"""
msg:
- description: Message with action result or error description.
- returned: always
- type: str
- sample: "Action was successful"
+ description: Message with action result or error description.
+ returned: always
+ type: str
+ sample: "Action was successful"
jobUri:
- description: URI to use to monitor status of the operation. Returned for async commands such as Firmware Update, Firmware Activate.
- returned: when supported
- type: str
- sample: "https://ioma.wdc.com/Storage/Devices/openflex-data24-usalp03020qb0003/Jobs/FirmwareUpdate/"
+ description: URI to use to monitor status of the operation. Returned for async commands such as Firmware Update, Firmware
+ Activate.
+ returned: when supported
+ type: str
+ sample: "https://ioma.wdc.com/Storage/Devices/openflex-data24-usalp03020qb0003/Jobs/FirmwareUpdate/"
operationStatusId:
- description: OCAPI State ID (see OCAPI documentation for possible values).
- returned: when supported
- type: int
- sample: 2
-
-'''
+ description: OCAPI State ID (see OCAPI documentation for possible values).
+ returned: when supported
+ type: int
+ sample: 2
+"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.general.plugins.module_utils.ocapi_utils import OcapiUtils
diff --git a/plugins/modules/ocapi_info.py b/plugins/modules/ocapi_info.py
index 9906d804c1..150b3ad7e2 100644
--- a/plugins/modules/ocapi_info.py
+++ b/plugins/modules/ocapi_info.py
@@ -10,14 +10,12 @@ from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
module: ocapi_info
version_added: 6.3.0
short_description: Manages Out-Of-Band controllers using Open Composable API (OCAPI)
description:
- - Builds OCAPI URIs locally and sends them to remote OOB controllers to
- get information back.
+ - Builds OCAPI URIs locally and sends them to remote OOB controllers to get information back.
extends_documentation_fragment:
- community.general.attributes
- community.general.attributes.info_module
@@ -38,7 +36,7 @@ options:
- Base URI of OOB controller.
type: str
proxy_slot_number:
- description: For proxied inband requests, the slot number of the IOM. Only applies if O(baseuri) is a proxy server.
+ description: For proxied inband requests, the slot number of the IOM. Only applies if O(baseuri) is a proxy server.
type: int
username:
required: true
@@ -62,84 +60,86 @@ options:
author: "Mike Moerk (@mikemoerk)"
-'''
+"""
-EXAMPLES = '''
- - name: Get job status
- community.general.ocapi_info:
- category: Status
- command: JobStatus
- baseuri: "http://iom1.wdc.com"
- jobName: FirmwareUpdate
- username: "{{ username }}"
- password: "{{ password }}"
-'''
+EXAMPLES = r"""
+- name: Get job status
+ community.general.ocapi_info:
+ category: Status
+ command: JobStatus
+ baseuri: "http://iom1.wdc.com"
+ jobName: FirmwareUpdate
+ username: "{{ username }}"
+ password: "{{ password }}"
+"""
-RETURN = '''
+RETURN = r"""
msg:
- description: Message with action result or error description.
- returned: always
- type: str
- sample: "Action was successful"
+ description: Message with action result or error description.
+ returned: always
+ type: str
+ sample: "Action was successful"
percentComplete:
- description: Percent complete of the relevant operation. Applies to O(command=JobStatus).
- returned: when supported
- type: int
- sample: 99
+ description: Percent complete of the relevant operation. Applies to O(command=JobStatus).
+ returned: when supported
+ type: int
+ sample: 99
operationStatus:
- description: Status of the relevant operation. Applies to O(command=JobStatus). See OCAPI documentation for details.
- returned: when supported
- type: str
- sample: "Activate needed"
+ description: Status of the relevant operation. Applies to O(command=JobStatus). See OCAPI documentation for details.
+ returned: when supported
+ type: str
+ sample: "Activate needed"
operationStatusId:
- description: Integer value of status (corresponds to operationStatus). Applies to O(command=JobStatus). See OCAPI documentation for details.
- returned: when supported
- type: int
- sample: 65540
+ description: Integer value of status (corresponds to operationStatus). Applies to O(command=JobStatus). See OCAPI documentation
+ for details.
+ returned: when supported
+ type: int
+ sample: 65540
operationHealth:
- description: Health of the operation. Applies to O(command=JobStatus). See OCAPI documentation for details.
- returned: when supported
- type: str
- sample: "OK"
+ description: Health of the operation. Applies to O(command=JobStatus). See OCAPI documentation for details.
+ returned: when supported
+ type: str
+ sample: "OK"
operationHealthId:
- description: >
- Integer value for health of the operation (corresponds to RV(operationHealth)). Applies to O(command=JobStatus).
- See OCAPI documentation for details.
- returned: when supported
- type: str
- sample: "OK"
+ description: >-
+ Integer value for health of the operation (corresponds to RV(operationHealth)). Applies to O(command=JobStatus). See OCAPI
+ documentation for details.
+ returned: when supported
+ type: str
+ sample: "OK"
details:
- description: Details of the relevant operation. Applies to O(command=JobStatus).
- returned: when supported
- type: list
- elements: str
+ description: Details of the relevant operation. Applies to O(command=JobStatus).
+ returned: when supported
+ type: list
+ elements: str
status:
- description: Dictionary containing status information. See OCAPI documentation for details.
- returned: when supported
- type: dict
- sample: {
- "Details": [
- "None"
- ],
- "Health": [
- {
- "ID": 5,
- "Name": "OK"
- }
- ],
- "State": {
- "ID": 16,
- "Name": "In service"
- }
- }
-'''
+ description: Dictionary containing status information. See OCAPI documentation for details.
+ returned: when supported
+ type: dict
+ sample:
+ {
+ "Details": [
+ "None"
+ ],
+ "Health": [
+ {
+ "ID": 5,
+ "Name": "OK"
+ }
+ ],
+ "State": {
+ "ID": 16,
+ "Name": "In service"
+ }
+ }
+"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.general.plugins.module_utils.ocapi_utils import OcapiUtils
diff --git a/plugins/modules/oci_vcn.py b/plugins/modules/oci_vcn.py
index bf110b94b5..56a637ac2c 100644
--- a/plugins/modules/oci_vcn.py
+++ b/plugins/modules/oci_vcn.py
@@ -8,49 +8,46 @@ from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
module: oci_vcn
short_description: Manage Virtual Cloud Networks(VCN) in OCI
description:
- - This module allows the user to create, delete and update virtual cloud networks(VCNs) in OCI.
- The complete Oracle Cloud Infrastructure Ansible Modules can be downloaded from
- U(https://github.com/oracle/oci-ansible-modules/releases).
+ - This module allows the user to create, delete and update virtual cloud networks(VCNs) in OCI. The complete Oracle Cloud
+ Infrastructure Ansible Modules can be downloaded from U(https://github.com/oracle/oci-ansible-modules/releases).
attributes:
- check_mode:
- support: none
- diff_mode:
- support: none
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
options:
- cidr_block:
- description: The CIDR IP address block of the VCN. Required when creating a VCN with O(state=present).
- type: str
- required: false
- compartment_id:
- description: The OCID of the compartment to contain the VCN. Required when creating a VCN with O(state=present).
- This option is mutually exclusive with O(vcn_id).
- type: str
- display_name:
- description: A user-friendly name. Does not have to be unique, and it's changeable.
- type: str
- aliases: [ 'name' ]
- dns_label:
- description: A DNS label for the VCN, used in conjunction with the VNIC's hostname and subnet's DNS label to
- form a fully qualified domain name (FQDN) for each VNIC within this subnet (for example,
- bminstance-1.subnet123.vcn1.oraclevcn.com). Not required to be unique, but it's a best practice
- to set unique DNS labels for VCNs in your tenancy. Must be an alphanumeric string that begins
- with a letter. The value cannot be changed.
- type: str
- state:
- description: Create or update a VCN with O(state=present). Use O(state=absent) to delete a VCN.
- type: str
- default: present
- choices: ['present', 'absent']
- vcn_id:
- description: The OCID of the VCN. Required when deleting a VCN with O(state=absent) or updating a VCN
- with O(state=present). This option is mutually exclusive with O(compartment_id).
- type: str
- aliases: [ 'id' ]
+ cidr_block:
+ description: The CIDR IP address block of the VCN. Required when creating a VCN with O(state=present).
+ type: str
+ required: false
+ compartment_id:
+ description: The OCID of the compartment to contain the VCN. Required when creating a VCN with O(state=present). This
+ option is mutually exclusive with O(vcn_id).
+ type: str
+ display_name:
+ description: A user-friendly name. Does not have to be unique, and it is changeable.
+ type: str
+ aliases: ['name']
+ dns_label:
+ description: A DNS label for the VCN, used in conjunction with the VNIC's hostname and subnet's DNS label to form a fully
+ qualified domain name (FQDN) for each VNIC within this subnet (for example, V(bminstance-1.subnet123.vcn1.oraclevcn.com)).
+ Not required to be unique, but it is a best practice to set unique DNS labels for VCNs in your tenancy. Must be an alphanumeric
+ string that begins with a letter. The value cannot be changed.
+ type: str
+ state:
+ description: Create or update a VCN with O(state=present). Use O(state=absent) to delete a VCN.
+ type: str
+ default: present
+ choices: ['present', 'absent']
+ vcn_id:
+ description: The OCID of the VCN. Required when deleting a VCN with O(state=absent) or updating a VCN with O(state=present).
+ This option is mutually exclusive with O(compartment_id).
+ type: str
+ aliases: ['id']
author: "Rohit Chaware (@rohitChaware)"
extends_documentation_fragment:
- community.general.oracle
@@ -58,10 +55,9 @@ extends_documentation_fragment:
- community.general.oracle_wait_options
- community.general.oracle_tags
- community.general.attributes
+"""
-'''
-
-EXAMPLES = """
+EXAMPLES = r"""
- name: Create a VCN
community.general.oci_vcn:
cidr_block: '10.0.0.0/16'
@@ -80,24 +76,25 @@ EXAMPLES = """
state: absent
"""
-RETURN = """
+RETURN = r"""
vcn:
- description: Information about the VCN
- returned: On successful create and update operation
- type: dict
- sample: {
- "cidr_block": "10.0.0.0/16",
- compartment_id": "ocid1.compartment.oc1..xxxxxEXAMPLExxxxx",
- "default_dhcp_options_id": "ocid1.dhcpoptions.oc1.phx.xxxxxEXAMPLExxxxx",
- "default_route_table_id": "ocid1.routetable.oc1.phx.xxxxxEXAMPLExxxxx",
- "default_security_list_id": "ocid1.securitylist.oc1.phx.xxxxxEXAMPLExxxxx",
- "display_name": "ansible_vcn",
- "dns_label": "ansiblevcn",
- "id": "ocid1.vcn.oc1.phx.xxxxxEXAMPLExxxxx",
- "lifecycle_state": "AVAILABLE",
- "time_created": "2017-11-13T20:22:40.626000+00:00",
- "vcn_domain_name": "ansiblevcn.oraclevcn.com"
- }
+ description: Information about the VCN.
+ returned: On successful create and update operation
+ type: dict
+ sample:
+ {
+ "cidr_block": "10.0.0.0/16",
+ "compartment_id\"": "ocid1.compartment.oc1..xxxxxEXAMPLExxxxx",
+ "default_dhcp_options_id": "ocid1.dhcpoptions.oc1.phx.xxxxxEXAMPLExxxxx",
+ "default_route_table_id": "ocid1.routetable.oc1.phx.xxxxxEXAMPLExxxxx",
+ "default_security_list_id": "ocid1.securitylist.oc1.phx.xxxxxEXAMPLExxxxx",
+ "display_name": "ansible_vcn",
+ "dns_label": "ansiblevcn",
+ "id": "ocid1.vcn.oc1.phx.xxxxxEXAMPLExxxxx",
+ "lifecycle_state": "AVAILABLE",
+ "time_created": "2017-11-13T20:22:40.626000+00:00",
+ "vcn_domain_name": "ansiblevcn.oraclevcn.com"
+ }
"""
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
@@ -165,17 +162,12 @@ def main():
)
module_args.update(
dict(
- cidr_block=dict(type="str", required=False),
- compartment_id=dict(type="str", required=False),
- display_name=dict(type="str", required=False, aliases=["name"]),
- dns_label=dict(type="str", required=False),
- state=dict(
- type="str",
- required=False,
- default="present",
- choices=["absent", "present"],
- ),
- vcn_id=dict(type="str", required=False, aliases=["id"]),
+ cidr_block=dict(type="str"),
+ compartment_id=dict(type="str"),
+ display_name=dict(type="str", aliases=["name"]),
+ dns_label=dict(type="str"),
+ state=dict(type="str", default="present", choices=["absent", "present"]),
+ vcn_id=dict(type="str", aliases=["id"]),
)
)
diff --git a/plugins/modules/odbc.py b/plugins/modules/odbc.py
index bc2e89656d..41b5df4f08 100644
--- a/plugins/modules/odbc.py
+++ b/plugins/modules/odbc.py
@@ -8,55 +8,54 @@
from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
module: odbc
author: "John Westcott IV (@john-westcott-iv)"
version_added: "1.0.0"
-short_description: Execute SQL via ODBC
+short_description: Execute SQL using ODBC
description:
- - Read/Write info via ODBC drivers.
+ - Read/Write info using ODBC drivers.
extends_documentation_fragment:
- - community.general.attributes
+ - community.general.attributes
attributes:
- check_mode:
- support: none
- diff_mode:
- support: none
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
options:
- dsn:
- description:
- - The connection string passed into ODBC.
- required: true
- type: str
- query:
- description:
- - The SQL query to perform.
- required: true
- type: str
- params:
- description:
- - Parameters to pass to the SQL query.
- type: list
- elements: str
- commit:
- description:
- - Perform a commit after the execution of the SQL query.
- - Some databases allow a commit after a select whereas others raise an exception.
- - Default is V(true) to support legacy module behavior.
- type: bool
- default: true
- version_added: 1.3.0
+ dsn:
+ description:
+ - The connection string passed into ODBC.
+ required: true
+ type: str
+ query:
+ description:
+ - The SQL query to perform.
+ required: true
+ type: str
+ params:
+ description:
+ - Parameters to pass to the SQL query.
+ type: list
+ elements: str
+ commit:
+ description:
+ - Perform a commit after the execution of the SQL query.
+ - Some databases allow a commit after a select whereas others raise an exception.
+ - Default is V(true) to support legacy module behavior.
+ type: bool
+ default: true
+ version_added: 1.3.0
requirements:
- "pyodbc"
notes:
- - "Like the command module, this module always returns changed = yes whether or not the query would change the database."
- - "To alter this behavior you can use C(changed_when): [yes or no]."
- - "For details about return values (description and row_count) see U(https://github.com/mkleehammer/pyodbc/wiki/Cursor)."
-'''
+ - Like the command module, this module always returns V(changed=true) whether or not the query would change the database.
+ - 'To alter this behavior you can use C(changed_when): [true or false].'
+ - For details about return values (RV(description) and RV(row_count)) see U(https://github.com/mkleehammer/pyodbc/wiki/Cursor).
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Set some values in the test db
community.general.odbc:
dsn: "DRIVER={ODBC Driver 13 for SQL Server};Server=db.ansible.com;Database=my_db;UID=admin;PWD=password;"
@@ -65,24 +64,25 @@ EXAMPLES = '''
- "value1"
commit: false
changed_when: false
-'''
+"""
-RETURN = '''
+# @FIXME RV 'results' is meant to be used when 'loop:' was used with the module.
+RETURN = r"""
results:
- description: List of lists of strings containing selected rows, likely empty for DDL statements.
- returned: success
- type: list
- elements: list
+ description: List of lists of strings containing selected rows, likely empty for DDL statements.
+ returned: success
+ type: list
+ elements: list
description:
- description: "List of dicts about the columns selected from the cursors, likely empty for DDL statements. See notes."
- returned: success
- type: list
- elements: dict
+ description: "List of dicts about the columns selected from the cursors, likely empty for DDL statements. See notes."
+ returned: success
+ type: list
+ elements: dict
row_count:
- description: "The number of rows selected or modified according to the cursor defaults to -1. See notes."
- returned: success
- type: str
-'''
+ description: "The number of rows selected or modified according to the cursor defaults to V(-1). See notes."
+ returned: success
+ type: str
+"""
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
from ansible.module_utils.common.text.converters import to_native
diff --git a/plugins/modules/office_365_connector_card.py b/plugins/modules/office_365_connector_card.py
index ed8ebd188b..6b8384a7ca 100644
--- a/plugins/modules/office_365_connector_card.py
+++ b/plugins/modules/office_365_connector_card.py
@@ -8,17 +8,16 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
-DOCUMENTATION = '''
+DOCUMENTATION = r"""
module: office_365_connector_card
short_description: Use webhooks to create Connector Card messages within an Office 365 group
description:
- - Creates Connector Card messages through
- Office 365 Connectors
+ - Creates Connector Card messages through Office 365 Connectors.
+ - See
U(https://learn.microsoft.com/en-us/microsoftteams/platform/task-modules-and-cards/cards/cards-reference#connector-card-for-microsoft-365-groups).
author: "Marc Sensenich (@marc-sensenich)"
notes:
- - This module is not idempotent, therefore if the same task is run twice
- there will be two Connector Cards created
+ - This module is not idempotent, therefore if you run the same task twice then you create two Connector Cards.
extends_documentation_fragment:
- community.general.attributes
attributes:
@@ -36,8 +35,8 @@ options:
type: str
description:
- A string used for summarizing card content.
- - This will be shown as the message subject.
- - This is required if the text parameter isn't populated.
+ - This is the message subject.
+ - This is required if the text parameter is not populated.
color:
type: str
description:
@@ -50,23 +49,22 @@ options:
type: str
description:
- The main text of the card.
- - This will be rendered below the sender information and optional title,
- - and above any sections or actions present.
+ - This is rendered below the sender information and optional title,
+ - And above any sections or actions present.
actions:
type: list
elements: dict
description:
- - This array of objects will power the action links
- - found at the bottom of the card.
+ - This array of objects is used to power the action links found at the bottom of the card.
sections:
type: list
elements: dict
description:
- Contains a list of sections to display in the card.
- For more information see U(https://learn.microsoft.com/en-us/outlook/actionable-messages/message-card-reference#section-fields).
-'''
+"""
-EXAMPLES = """
+EXAMPLES = r"""
- name: Create a simple Connector Card
community.general.office_365_connector_card:
webhook: https://outlook.office.com/webhook/GUID/IncomingWebhook/GUID/GUID
@@ -77,71 +75,70 @@ EXAMPLES = """
webhook: https://outlook.office.com/webhook/GUID/IncomingWebhook/GUID/GUID
summary: This is the summary property
title: This is the **card's title** property
- text: This is the **card's text** property. Lorem ipsum dolor sit amet, consectetur
- adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.
+ text: This is the **card's text** property. Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod
+ tempor incididunt ut labore et dolore magna aliqua.
color: E81123
sections:
- - title: This is the **section's title** property
- activity_image: http://connectorsdemo.azurewebsites.net/images/MSC12_Oscar_002.jpg
- activity_title: This is the section's **activityTitle** property
- activity_subtitle: This is the section's **activitySubtitle** property
- activity_text: This is the section's **activityText** property.
- hero_image:
- image: http://connectorsdemo.azurewebsites.net/images/WIN12_Scene_01.jpg
- title: This is the image's alternate text
- text: This is the section's text property. Lorem ipsum dolor sit amet, consectetur
- adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.
- facts:
- - name: This is a fact name
- value: This is a fact value
- - name: This is a fact name
- value: This is a fact value
- - name: This is a fact name
- value: This is a fact value
- images:
- - image: http://connectorsdemo.azurewebsites.net/images/MicrosoftSurface_024_Cafe_OH-06315_VS_R1c.jpg
- title: This is the image's alternate text
- - image: http://connectorsdemo.azurewebsites.net/images/WIN12_Scene_01.jpg
- title: This is the image's alternate text
- - image: http://connectorsdemo.azurewebsites.net/images/WIN12_Anthony_02.jpg
- title: This is the image's alternate text
- actions:
- - "@type": ActionCard
- name: Comment
- inputs:
- - "@type": TextInput
- id: comment
- is_multiline: true
- title: Input's title property
+ - title: This is the **section's title** property
+ activity_image: http://connectorsdemo.azurewebsites.net/images/MSC12_Oscar_002.jpg
+ activity_title: This is the section's **activityTitle** property
+ activity_subtitle: This is the section's **activitySubtitle** property
+ activity_text: This is the section's **activityText** property.
+ hero_image:
+ image: http://connectorsdemo.azurewebsites.net/images/WIN12_Scene_01.jpg
+ title: This is the image's alternate text
+ text: This is the section's text property. Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod
+ tempor incididunt ut labore et dolore magna aliqua.
+ facts:
+ - name: This is a fact name
+ value: This is a fact value
+ - name: This is a fact name
+ value: This is a fact value
+ - name: This is a fact name
+ value: This is a fact value
+ images:
+ - image: http://connectorsdemo.azurewebsites.net/images/MicrosoftSurface_024_Cafe_OH-06315_VS_R1c.jpg
+ title: This is the image's alternate text
+ - image: http://connectorsdemo.azurewebsites.net/images/WIN12_Scene_01.jpg
+ title: This is the image's alternate text
+ - image: http://connectorsdemo.azurewebsites.net/images/WIN12_Anthony_02.jpg
+ title: This is the image's alternate text
actions:
- - "@type": HttpPOST
- name: Save
- target: http://...
- - "@type": ActionCard
- name: Due Date
- inputs:
- - "@type": DateInput
- id: dueDate
- title: Input's title property
- actions:
- - "@type": HttpPOST
- name: Save
- target: http://...
- - "@type": HttpPOST
- name: Action's name prop.
- target: http://...
- - "@type": OpenUri
- name: Action's name prop
- targets:
- - os: default
- uri: http://...
- - start_group: true
- title: This is the title of a **second section**
- text: This second section is visually separated from the first one by setting its
- **startGroup** property to true.
+ - "@type": ActionCard
+ name: Comment
+ inputs:
+ - "@type": TextInput
+ id: comment
+ is_multiline: true
+ title: Input's title property
+ actions:
+ - "@type": HttpPOST
+ name: Save
+ target: http://...
+ - "@type": ActionCard
+ name: Due Date
+ inputs:
+ - "@type": DateInput
+ id: dueDate
+ title: Input's title property
+ actions:
+ - "@type": HttpPOST
+ name: Save
+ target: http://...
+ - "@type": HttpPOST
+ name: Action's name prop.
+ target: http://...
+ - "@type": OpenUri
+ name: Action's name prop
+ targets:
+ - os: default
+ uri: http://...
+ - start_group: true
+ title: This is the title of a **second section**
+ text: This second section is visually separated from the first one by setting its **startGroup** property to true.
"""
-RETURN = """
+RETURN = r"""
"""
# import module snippets
diff --git a/plugins/modules/ohai.py b/plugins/modules/ohai.py
index 7fdab3bb75..32b14b2e81 100644
--- a/plugins/modules/ohai.py
+++ b/plugins/modules/ohai.py
@@ -9,15 +9,12 @@ from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
module: ohai
short_description: Returns inventory data from I(Ohai)
description:
- - Similar to the M(community.general.facter) module, this runs the I(Ohai) discovery program
- (U(https://docs.chef.io/ohai.html)) on the remote host and
- returns JSON inventory data.
- I(Ohai) data is a bit more verbose and nested than I(facter).
+ - Similar to the M(community.general.facter) module, this runs the I(Ohai) discovery program (U(https://docs.chef.io/ohai.html))
+ on the remote host and returns JSON inventory data. I(Ohai) data is a bit more verbose and nested than I(facter).
extends_documentation_fragment:
- community.general.attributes
attributes:
@@ -27,16 +24,16 @@ attributes:
support: none
options: {}
notes: []
-requirements: [ "ohai" ]
+requirements: ["ohai"]
author:
- "Ansible Core Team"
- "Michael DeHaan (@mpdehaan)"
-'''
+"""
-EXAMPLES = '''
-# Retrieve (ohai) data from all Web servers and store in one-file per host
+EXAMPLES = r"""
ansible webservers -m ohai --tree=/tmp/ohaidata
-'''
+...
+"""
import json
from ansible.module_utils.basic import AnsibleModule
diff --git a/plugins/modules/omapi_host.py b/plugins/modules/omapi_host.py
index c93c578535..36c5434fd5 100644
--- a/plugins/modules/omapi_host.py
+++ b/plugins/modules/omapi_host.py
@@ -10,11 +10,10 @@
from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = r'''
----
+DOCUMENTATION = r"""
module: omapi_host
short_description: Setup OMAPI hosts
-description: Manage OMAPI hosts into compatible DHCPd servers
+description: Manage OMAPI hosts into compatible DHCPd servers.
requirements:
- pypureomapi
author:
@@ -22,65 +21,64 @@ author:
extends_documentation_fragment:
- community.general.attributes
attributes:
- check_mode:
- support: none
- diff_mode:
- support: none
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
options:
- state:
- description:
- - Create or remove OMAPI host.
- type: str
- required: true
- choices: [ absent, present ]
- hostname:
- description:
- - Sets the host lease hostname (mandatory if state=present).
- type: str
- aliases: [ name ]
- host:
- description:
- - Sets OMAPI server host to interact with.
- type: str
- default: localhost
- port:
- description:
- - Sets the OMAPI server port to interact with.
- type: int
- default: 7911
- key_name:
- description:
- - Sets the TSIG key name for authenticating against OMAPI server.
- type: str
- required: true
- key:
- description:
- - Sets the TSIG key content for authenticating against OMAPI server.
- type: str
- required: true
- macaddr:
- description:
- - Sets the lease host MAC address.
- type: str
- required: true
- ip:
- description:
- - Sets the lease host IP address.
- type: str
- statements:
- description:
- - Attach a list of OMAPI DHCP statements with host lease (without ending semicolon).
- type: list
- elements: str
- default: []
- ddns:
- description:
- - Enable dynamic DNS updates for this host.
- type: bool
- default: false
-
-'''
-EXAMPLES = r'''
+ state:
+ description:
+ - Create or remove OMAPI host.
+ type: str
+ required: true
+ choices: [absent, present]
+ hostname:
+ description:
+ - Sets the host lease hostname (mandatory if O(state=present)).
+ type: str
+ aliases: [name]
+ host:
+ description:
+ - Sets OMAPI server host to interact with.
+ type: str
+ default: localhost
+ port:
+ description:
+ - Sets the OMAPI server port to interact with.
+ type: int
+ default: 7911
+ key_name:
+ description:
+ - Sets the TSIG key name for authenticating against OMAPI server.
+ type: str
+ required: true
+ key:
+ description:
+ - Sets the TSIG key content for authenticating against OMAPI server.
+ type: str
+ required: true
+ macaddr:
+ description:
+ - Sets the lease host MAC address.
+ type: str
+ required: true
+ ip:
+ description:
+ - Sets the lease host IP address.
+ type: str
+ statements:
+ description:
+ - Attach a list of OMAPI DHCP statements with host lease (without ending semicolon).
+ type: list
+ elements: str
+ default: []
+ ddns:
+ description:
+ - Enable dynamic DNS updates for this host.
+ type: bool
+ default: false
+"""
+EXAMPLES = r"""
- name: Add a host using OMAPI
community.general.omapi_host:
key_name: defomapi
@@ -91,8 +89,8 @@ EXAMPLES = r'''
ip: 192.168.88.99
ddns: true
statements:
- - filename "pxelinux.0"
- - next-server 1.1.1.1
+ - filename "pxelinux.0"
+ - next-server 1.1.1.1
state: present
- name: Remove a host using OMAPI
@@ -102,35 +100,35 @@ EXAMPLES = r'''
host: 10.1.1.1
macaddr: 00:66:ab:dd:11:44
state: absent
-'''
+"""
-RETURN = r'''
+RETURN = r"""
lease:
- description: dictionary containing host information
- returned: success
- type: complex
- contains:
- ip-address:
- description: IP address, if there is.
- returned: success
- type: str
- sample: '192.168.1.5'
- hardware-address:
- description: MAC address
- returned: success
- type: str
- sample: '00:11:22:33:44:55'
- hardware-type:
- description: hardware type, generally '1'
- returned: success
- type: int
- sample: 1
- name:
- description: hostname
- returned: success
- type: str
- sample: 'mydesktop'
-'''
+ description: Dictionary containing host information.
+ returned: success
+ type: complex
+ contains:
+ ip-address:
+ description: IP address, if there is.
+ returned: success
+ type: str
+ sample: '192.168.1.5'
+ hardware-address:
+ description: MAC address.
+ returned: success
+ type: str
+ sample: '00:11:22:33:44:55'
+ hardware-type:
+ description: Hardware type, generally V(1).
+ returned: success
+ type: int
+ sample: 1
+ name:
+ description: Hostname.
+ returned: success
+ type: str
+ sample: 'mydesktop'
+"""
import binascii
import socket
diff --git a/plugins/modules/one_host.py b/plugins/modules/one_host.py
index 6188f3d0f7..7c43da3058 100644
--- a/plugins/modules/one_host.py
+++ b/plugins/modules/one_host.py
@@ -10,87 +10,86 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
module: one_host
short_description: Manages OpenNebula Hosts
requirements:
- - pyone
+ - pyone
description:
- - "Manages OpenNebula Hosts"
-
+ - Manages OpenNebula Hosts.
attributes:
- check_mode:
- support: none
- diff_mode:
- support: none
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
options:
- name:
- description:
- - Hostname of the machine to manage.
- required: true
- type: str
- state:
- description:
- - Takes the host to the desired lifecycle state.
- - If V(absent) the host will be deleted from the cluster.
- - If V(present) the host will be created in the cluster (includes V(enabled), V(disabled) and V(offline) states).
- - If V(enabled) the host is fully operational.
- - V(disabled), e.g. to perform maintenance operations.
- - V(offline), host is totally offline.
- choices:
- - absent
- - present
- - enabled
- - disabled
- - offline
- default: present
- type: str
- im_mad_name:
- description:
- - The name of the information manager, this values are taken from the oned.conf with the tag name IM_MAD (name)
- default: kvm
- type: str
- vmm_mad_name:
- description:
- - The name of the virtual machine manager mad name, this values are taken from the oned.conf with the tag name VM_MAD (name)
- default: kvm
- type: str
- cluster_id:
- description:
- - The cluster ID.
- default: 0
- type: int
- cluster_name:
- description:
- - The cluster specified by name.
- type: str
- labels:
- description:
- - The labels for this host.
- type: list
- elements: str
- template:
- description:
- - The template or attribute changes to merge into the host template.
- aliases:
- - attributes
- type: dict
+ name:
+ description:
+ - Hostname of the machine to manage.
+ required: true
+ type: str
+ state:
+ description:
+ - Takes the host to the desired lifecycle state.
+ - If V(absent) the host is deleted from the cluster.
+ - If V(present) the host is created in the cluster (includes V(enabled), V(disabled) and V(offline) states).
+ - If V(enabled) the host is fully operational.
+ - V(disabled), for example to perform maintenance operations.
+ - V(offline), host is totally offline.
+ choices:
+ - absent
+ - present
+ - enabled
+ - disabled
+ - offline
+ default: present
+ type: str
+ im_mad_name:
+ description:
+ - The name of the information manager, this values are taken from the oned.conf with the tag name IM_MAD (name).
+ default: kvm
+ type: str
+ vmm_mad_name:
+ description:
+ - The name of the virtual machine manager mad name, this values are taken from the oned.conf with the tag name VM_MAD
+ (name).
+ default: kvm
+ type: str
+ cluster_id:
+ description:
+ - The cluster ID.
+ default: 0
+ type: int
+ cluster_name:
+ description:
+ - The cluster specified by name.
+ type: str
+ labels:
+ description:
+ - The labels for this host.
+ type: list
+ elements: str
+ template:
+ description:
+ - The template or attribute changes to merge into the host template.
+ aliases:
+ - attributes
+ type: dict
extends_documentation_fragment:
- - community.general.opennebula
- - community.general.attributes
+ - community.general.opennebula
+ - community.general.attributes
author:
- - Rafael del Valle (@rvalle)
-'''
+ - Rafael del Valle (@rvalle)
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Create a new host in OpenNebula
community.general.one_host:
name: host1
@@ -102,15 +101,15 @@ EXAMPLES = '''
name: host2
cluster_name: default
template:
- LABELS:
- - gold
- - ssd
- RESERVED_CPU: -100
-'''
+ LABELS:
+ - gold
+ - ssd
+ RESERVED_CPU: -100
+"""
# TODO: pending setting guidelines on returned values
-RETURN = '''
-'''
+RETURN = r"""
+"""
# TODO: Documentation on valid state transitions is required to properly implement all valid cases
# TODO: To be coherent with CLI this module should also provide "flush" functionality
diff --git a/plugins/modules/one_image.py b/plugins/modules/one_image.py
index 5877142cdf..d9a21f86b7 100644
--- a/plugins/modules/one_image.py
+++ b/plugins/modules/one_image.py
@@ -8,12 +8,11 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
module: one_image
short_description: Manages OpenNebula images
description:
- - Manages OpenNebula images
+ - Manages OpenNebula images.
requirements:
- pyone
extends_documentation_fragment:
@@ -32,13 +31,14 @@ options:
name:
description:
- A O(name) of the image you would like to manage.
+ - Required if O(create=true).
type: str
state:
description:
- - V(present) - state that is used to manage the image
- - V(absent) - delete the image
- - V(cloned) - clone the image
- - V(renamed) - rename the image to the O(new_name)
+ - V(present) - state that is used to manage the image.
+ - V(absent) - delete the image.
+ - V(cloned) - clone the image.
+ - V(renamed) - rename the image to the O(new_name).
choices: ["present", "absent", "cloned", "renamed"]
default: present
type: str
@@ -48,19 +48,41 @@ options:
type: bool
new_name:
description:
- - A name that will be assigned to the existing or new image.
- - In the case of cloning, by default O(new_name) will take the name of the origin image with the prefix 'Copy of'.
+ - A name that is assigned to the existing or new image.
+ - In the case of cloning, by default O(new_name) is set to the name of the origin image with the prefix 'Copy of'.
type: str
persistent:
description:
- Whether the image should be persistent or non-persistent.
type: bool
version_added: 9.5.0
+ create:
+ description:
+ - Whether the image should be created if not present.
+ - This is ignored if O(state=absent).
+ type: bool
+ version_added: 10.0.0
+ template:
+ description:
+ - Use with O(create=true) to specify image template.
+ type: str
+ version_added: 10.0.0
+ datastore_id:
+ description:
+ - Use with O(create=true) to specify datastore for image.
+ type: int
+ version_added: 10.0.0
+ wait_timeout:
+ description:
+ - Seconds to wait until image is ready, deleted or cloned.
+ type: int
+ default: 60
+ version_added: 10.0.0
author:
- - "Milan Ilic (@ilicmilan)"
-'''
+ - "Milan Ilic (@ilicmilan)"
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Fetch the IMAGE by id
community.general.one_image:
id: 45
@@ -102,221 +124,250 @@ EXAMPLES = '''
community.general.one_image:
id: '{{ result.id }}'
state: absent
-'''
-RETURN = '''
+- name: Make sure IMAGE is present
+ community.general.one_image:
+ name: myyy-image
+ state: present
+ create: true
+ datastore_id: 100
+ template: |
+ PATH = "/var/tmp/image"
+ TYPE = "OS"
+ SIZE = 20512
+ FORMAT = "qcow2"
+ PERSISTENT = "Yes"
+ DEV_PREFIX = "vd"
+
+- name: Make sure IMAGE is present with a longer timeout
+ community.general.one_image:
+ name: big-image
+ state: present
+ create: true
+ datastore_id: 100
+ wait_timeout: 900
+ template: |-
+ PATH = "https://192.0.2.200/repo/tipa_image.raw"
+ TYPE = "OS"
+ SIZE = 82048
+ FORMAT = "raw"
+ PERSISTENT = "Yes"
+ DEV_PREFIX = "vd"
+"""
+
+RETURN = r"""
id:
- description: image id
- type: int
- returned: when O(state=present), O(state=cloned), or O(state=renamed)
- sample: 153
+ description: Image ID.
+ type: int
+ returned: when O(state=present), O(state=cloned), or O(state=renamed)
+ sample: 153
name:
- description: image name
- type: str
- returned: when O(state=present), O(state=cloned), or O(state=renamed)
- sample: app1
+ description: Image name.
+ type: str
+ returned: when O(state=present), O(state=cloned), or O(state=renamed)
+ sample: app1
group_id:
- description: image's group id
- type: int
- returned: when O(state=present), O(state=cloned), or O(state=renamed)
- sample: 1
+ description: Image's group ID.
+ type: int
+ returned: when O(state=present), O(state=cloned), or O(state=renamed)
+ sample: 1
group_name:
- description: image's group name
- type: str
- returned: when O(state=present), O(state=cloned), or O(state=renamed)
- sample: one-users
+ description: Image's group name.
+ type: str
+ returned: when O(state=present), O(state=cloned), or O(state=renamed)
+ sample: one-users
owner_id:
- description: image's owner id
- type: int
- returned: when O(state=present), O(state=cloned), or O(state=renamed)
- sample: 143
+ description: Image's owner ID.
+ type: int
+ returned: when O(state=present), O(state=cloned), or O(state=renamed)
+ sample: 143
owner_name:
- description: image's owner name
- type: str
- returned: when O(state=present), O(state=cloned), or O(state=renamed)
- sample: ansible-test
+ description: Image's owner name.
+ type: str
+ returned: when O(state=present), O(state=cloned), or O(state=renamed)
+ sample: ansible-test
state:
- description: state of image instance
- type: str
- returned: when O(state=present), O(state=cloned), or O(state=renamed)
- sample: READY
+ description: State of image instance.
+ type: str
+ returned: when O(state=present), O(state=cloned), or O(state=renamed)
+ sample: READY
used:
- description: is image in use
- type: bool
- returned: when O(state=present), O(state=cloned), or O(state=renamed)
- sample: true
+ description: Is image in use.
+ type: bool
+ returned: when O(state=present), O(state=cloned), or O(state=renamed)
+ sample: true
running_vms:
- description: count of running vms that use this image
- type: int
- returned: when O(state=present), O(state=cloned), or O(state=renamed)
- sample: 7
+ description: Count of running vms that use this image.
+ type: int
+ returned: when O(state=present), O(state=cloned), or O(state=renamed)
+ sample: 7
permissions:
- description: The image's permissions.
- type: dict
- returned: when O(state=present), O(state=cloned), or O(state=renamed)
- version_added: 9.5.0
- contains:
- owner_u:
- description: The image's owner USAGE permissions.
- type: str
- sample: 1
- owner_m:
- description: The image's owner MANAGE permissions.
- type: str
- sample: 0
- owner_a:
- description: The image's owner ADMIN permissions.
- type: str
- sample: 0
- group_u:
- description: The image's group USAGE permissions.
- type: str
- sample: 0
- group_m:
- description: The image's group MANAGE permissions.
- type: str
- sample: 0
- group_a:
- description: The image's group ADMIN permissions.
- type: str
- sample: 0
- other_u:
- description: The image's other users USAGE permissions.
- type: str
- sample: 0
- other_m:
- description: The image's other users MANAGE permissions.
- type: str
- sample: 0
- other_a:
- description: The image's other users ADMIN permissions
- type: str
- sample: 0
- sample:
- owner_u: 1
- owner_m: 0
- owner_a: 0
- group_u: 0
- group_m: 0
- group_a: 0
- other_u: 0
- other_m: 0
- other_a: 0
+ description: The image's permissions.
+ type: dict
+ returned: when O(state=present), O(state=cloned), or O(state=renamed)
+ version_added: 9.5.0
+ contains:
+ owner_u:
+ description: The image's owner USAGE permissions.
+ type: str
+ sample: 1
+ owner_m:
+ description: The image's owner MANAGE permissions.
+ type: str
+ sample: 0
+ owner_a:
+ description: The image's owner ADMIN permissions.
+ type: str
+ sample: 0
+ group_u:
+ description: The image's group USAGE permissions.
+ type: str
+ sample: 0
+ group_m:
+ description: The image's group MANAGE permissions.
+ type: str
+ sample: 0
+ group_a:
+ description: The image's group ADMIN permissions.
+ type: str
+ sample: 0
+ other_u:
+ description: The image's other users USAGE permissions.
+ type: str
+ sample: 0
+ other_m:
+ description: The image's other users MANAGE permissions.
+ type: str
+ sample: 0
+ other_a:
+ description: The image's other users ADMIN permissions.
+ type: str
+ sample: 0
+ sample:
+ owner_u: 1
+ owner_m: 0
+ owner_a: 0
+ group_u: 0
+ group_m: 0
+ group_a: 0
+ other_u: 0
+ other_m: 0
+ other_a: 0
type:
- description: The image's type.
- type: str
- sample: 0
- returned: when O(state=present), O(state=cloned), or O(state=renamed)
- version_added: 9.5.0
+ description: The image's type.
+ type: str
+ sample: 0
+ returned: when O(state=present), O(state=cloned), or O(state=renamed)
+ version_added: 9.5.0
disk_type:
- description: The image's format type.
- type: str
- sample: 0
- returned: when O(state=present), O(state=cloned), or O(state=renamed)
- version_added: 9.5.0
+ description: The image's format type.
+ type: str
+ sample: 0
+ returned: when O(state=present), O(state=cloned), or O(state=renamed)
+ version_added: 9.5.0
persistent:
- description: The image's persistence status (1 means true, 0 means false).
- type: int
- sample: 1
- returned: when O(state=present), O(state=cloned), or O(state=renamed)
- version_added: 9.5.0
+ description: The image's persistence status (1 means true, 0 means false).
+ type: int
+ sample: 1
+ returned: when O(state=present), O(state=cloned), or O(state=renamed)
+ version_added: 9.5.0
source:
- description: The image's source.
- type: str
- sample: /var/lib/one//datastores/100/somerandomstringxd
- returned: when O(state=present), O(state=cloned), or O(state=renamed)
+ description: The image's source.
+ type: str
+ sample: /var/lib/one//datastores/100/somerandomstringxd
+ returned: when O(state=present), O(state=cloned), or O(state=renamed)
path:
- description: The image's filesystem path.
- type: str
- sample: /var/tmp/hello.qcow2
- returned: when O(state=present), O(state=cloned), or O(state=renamed)
- version_added: 9.5.0
+ description: The image's filesystem path.
+ type: str
+ sample: /var/tmp/hello.qcow2
+ returned: when O(state=present), O(state=cloned), or O(state=renamed)
+ version_added: 9.5.0
fstype:
- description: The image's filesystem type.
- type: str
- sample: ext4
- returned: when O(state=present), O(state=cloned), or O(state=renamed)
- version_added: 9.5.0
+ description: The image's filesystem type.
+ type: str
+ sample: ext4
+ returned: when O(state=present), O(state=cloned), or O(state=renamed)
+ version_added: 9.5.0
size:
- description: The image's size in MegaBytes.
- type: int
- sample: 10000
- returned: when O(state=present), O(state=cloned), or O(state=renamed)
- version_added: 9.5.0
+ description: The image's size in MegaBytes.
+ type: int
+ sample: 10000
+ returned: when O(state=present), O(state=cloned), or O(state=renamed)
+ version_added: 9.5.0
cloning_ops:
- description: The image's cloning operations per second.
- type: int
- sample: 0
- returned: when O(state=present), O(state=cloned), or O(state=renamed)
- version_added: 9.5.0
+ description: The image's cloning operations per second.
+ type: int
+ sample: 0
+ returned: when O(state=present), O(state=cloned), or O(state=renamed)
+ version_added: 9.5.0
cloning_id:
- description: The image's cloning ID.
- type: int
- sample: -1
- returned: when O(state=present), O(state=cloned), or O(state=renamed)
- version_added: 9.5.0
+ description: The image's cloning ID.
+ type: int
+ sample: -1
+ returned: when O(state=present), O(state=cloned), or O(state=renamed)
+ version_added: 9.5.0
target_snapshot:
- description: The image's target snapshot.
- type: int
- sample: 1
- returned: when O(state=present), O(state=cloned), or O(state=renamed)
- version_added: 9.5.0
+ description: The image's target snapshot.
+ type: int
+ sample: 1
+ returned: when O(state=present), O(state=cloned), or O(state=renamed)
+ version_added: 9.5.0
datastore_id:
- description: The image's datastore ID.
- type: int
- sample: 100
- returned: when O(state=present), O(state=cloned), or O(state=renamed)
- version_added: 9.5.0
+ description: The image's datastore ID.
+ type: int
+ sample: 100
+ returned: when O(state=present), O(state=cloned), or O(state=renamed)
+ version_added: 9.5.0
datastore:
- description: The image's datastore name.
- type: int
- sample: image_datastore
- returned: when O(state=present), O(state=cloned), or O(state=renamed)
- version_added: 9.5.0
+ description: The image's datastore name.
+ type: int
+ sample: image_datastore
+ returned: when O(state=present), O(state=cloned), or O(state=renamed)
+ version_added: 9.5.0
vms:
- description: The image's list of vm ID's.
- type: list
- elements: int
- returned: when O(state=present), O(state=cloned), or O(state=renamed)
- sample:
- - 1
- - 2
- - 3
- version_added: 9.5.0
+ description: The image's list of VM ID's.
+ type: list
+ elements: int
+ returned: when O(state=present), O(state=cloned), or O(state=renamed)
+ sample:
+ - 1
+ - 2
+ - 3
+ version_added: 9.5.0
clones:
- description: The image's list of clones ID's.
- type: list
- elements: int
- returned: when O(state=present), O(state=cloned), or O(state=renamed)
- sample:
- - 1
- - 2
- - 3
- version_added: 9.5.0
+ description: The image's list of clones ID's.
+ type: list
+ elements: int
+ returned: when O(state=present), O(state=cloned), or O(state=renamed)
+ sample:
+ - 1
+ - 2
+ - 3
+ version_added: 9.5.0
app_clones:
- description: The image's list of app_clones ID's.
- type: list
- elements: int
- returned: when O(state=present), O(state=cloned), or O(state=renamed)
- sample:
- - 1
- - 2
- - 3
- version_added: 9.5.0
+ description: The image's list of app_clones ID's.
+ type: list
+ elements: int
+ returned: when O(state=present), O(state=cloned), or O(state=renamed)
+ sample:
+ - 1
+ - 2
+ - 3
+ version_added: 9.5.0
snapshots:
- description: The image's list of snapshots.
- type: list
- returned: when O(state=present), O(state=cloned), or O(state=renamed)
- version_added: 9.5.0
- sample:
- - date: 123123
- parent: 1
- size: 10228
- allow_orphans: 1
- children: 0
- active: 1
- name: SampleName
-'''
+ description: The image's list of snapshots.
+ type: list
+ returned: when O(state=present), O(state=cloned), or O(state=renamed)
+ version_added: 9.5.0
+ sample:
+ - date: 123123
+ parent: 1
+ size: 10228
+ allow_orphans: 1
+ children: 0
+ active: 1
+ name: SampleName
+"""
from ansible_collections.community.general.plugins.module_utils.opennebula import OpenNebulaModule
@@ -328,15 +379,20 @@ IMAGE_STATES = ['INIT', 'READY', 'USED', 'DISABLED', 'LOCKED', 'ERROR', 'CLONE',
class ImageModule(OpenNebulaModule):
def __init__(self):
argument_spec = dict(
- id=dict(type='int', required=False),
- name=dict(type='str', required=False),
+ id=dict(type='int'),
+ name=dict(type='str'),
state=dict(type='str', choices=['present', 'absent', 'cloned', 'renamed'], default='present'),
- enabled=dict(type='bool', required=False),
- new_name=dict(type='str', required=False),
- persistent=dict(type='bool', required=False),
+ enabled=dict(type='bool'),
+ new_name=dict(type='str'),
+ persistent=dict(type='bool'),
+ create=dict(type='bool'),
+ template=dict(type='str'),
+ datastore_id=dict(type='int'),
+ wait_timeout=dict(type='int', default=60),
)
required_if = [
- ['state', 'renamed', ['id']]
+ ['state', 'renamed', ['id']],
+ ['create', True, ['template', 'datastore_id', 'name']],
]
mutually_exclusive = [
['id', 'name'],
@@ -356,26 +412,32 @@ class ImageModule(OpenNebulaModule):
enabled = params.get('enabled')
new_name = params.get('new_name')
persistent = params.get('persistent')
+ create = params.get('create')
+ template = params.get('template')
+ datastore_id = params.get('datastore_id')
+ wait_timeout = params.get('wait_timeout')
self.result = {}
image = self.get_image_instance(id, name)
if not image and desired_state != 'absent':
+ if create:
+ self.result = self.create_image(name, template, datastore_id, wait_timeout)
# Using 'if id:' doesn't work properly when id=0
- if id is not None:
+ elif id is not None:
module.fail_json(msg="There is no image with id=" + str(id))
elif name is not None:
module.fail_json(msg="There is no image with name=" + name)
if desired_state == 'absent':
- self.result = self.delete_image(image)
+ self.result = self.delete_image(image, wait_timeout)
else:
if persistent is not None:
self.result = self.change_persistence(image, persistent)
if enabled is not None:
self.result = self.enable_image(image, enabled)
if desired_state == "cloned":
- self.result = self.clone_image(image, new_name)
+ self.result = self.clone_image(image, new_name, wait_timeout)
elif desired_state == "renamed":
self.result = self.rename_image(image, new_name)
@@ -404,6 +466,16 @@ class ImageModule(OpenNebulaModule):
else:
return self.get_image_by_name(requested_name)
+ def create_image(self, image_name, template, datastore_id, wait_timeout):
+ if not self.module.check_mode:
+ image_id = self.one.image.allocate("NAME = \"" + image_name + "\"\n" + template, datastore_id)
+ self.wait_for_ready(image_id, wait_timeout)
+ image = self.get_image_by_id(image_id)
+ result = self.get_image_info(image)
+
+ result['changed'] = True
+ return result
+
def wait_for_ready(self, image_id, wait_timeout=60):
import time
start_time = time.time()
@@ -491,7 +563,7 @@ class ImageModule(OpenNebulaModule):
return result
- def clone_image(self, image, new_name):
+ def clone_image(self, image, new_name, wait_timeout):
if new_name is None:
new_name = "Copy of " + image.NAME
@@ -506,7 +578,7 @@ class ImageModule(OpenNebulaModule):
if not self.module.check_mode:
new_id = self.one.image.clone(image.ID, new_name)
- self.wait_for_ready(new_id)
+ self.wait_for_ready(new_id, wait_timeout)
image = self.one.image.info(new_id)
result = self.get_image_info(image)
@@ -534,7 +606,7 @@ class ImageModule(OpenNebulaModule):
result['changed'] = True
return result
- def delete_image(self, image):
+ def delete_image(self, image, wait_timeout):
if not image:
return {'changed': False}
@@ -543,7 +615,7 @@ class ImageModule(OpenNebulaModule):
if not self.module.check_mode:
self.one.image.delete(image.ID)
- self.wait_for_delete(image.ID)
+ self.wait_for_delete(image.ID, wait_timeout)
return {'changed': True}
diff --git a/plugins/modules/one_image_info.py b/plugins/modules/one_image_info.py
index 4bc48dfda1..f940444cad 100644
--- a/plugins/modules/one_image_info.py
+++ b/plugins/modules/one_image_info.py
@@ -8,8 +8,7 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
module: one_image_info
short_description: Gather information on OpenNebula images
description:
@@ -23,25 +22,25 @@ extends_documentation_fragment:
options:
ids:
description:
- - A list of images ids whose facts you want to gather.
+ - A list of images IDs whose facts you want to gather.
- Module can use integers too.
aliases: ['id']
type: list
elements: str
name:
description:
- - A O(name) of the image whose facts will be gathered.
- - If the O(name) begins with V(~) the O(name) will be used as regex pattern
- - which restricts the list of images (whose facts will be returned) whose names match specified regex.
- - Also, if the O(name) begins with V(~*) case-insensitive matching will be performed.
+ - A O(name) of the image whose facts is gathered.
+ - If the O(name) begins with V(~) the O(name) is used as regex pattern, which restricts the list of images (whose facts
+ is returned) whose names match specified regex.
+ - Also, if the O(name) begins with V(~*) case-insensitive matching is performed.
- See examples for more details.
type: str
author:
- - "Milan Ilic (@ilicmilan)"
- - "Jan Meerkamp (@meerkampdvv)"
-'''
+ - "Milan Ilic (@ilicmilan)"
+ - "Jan Meerkamp (@meerkampdvv)"
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Gather facts about all images
community.general.one_image_info:
register: result
@@ -76,201 +75,201 @@ EXAMPLES = '''
community.general.one_image_info:
name: '~*foo-image-.*'
register: foo_images
-'''
+"""
-RETURN = '''
+RETURN = r"""
images:
- description: A list of images info
- type: complex
- returned: success
- contains:
- id:
- description: The image's id.
- type: int
- sample: 153
- name:
- description: The image's name.
- type: str
- sample: app1
- group_id:
- description: The image's group id
- type: int
- sample: 1
- group_name:
- description: The image's group name.
- type: str
- sample: one-users
- owner_id:
- description: The image's owner id.
- type: int
- sample: 143
- owner_name:
- description: The image's owner name.
- type: str
- sample: ansible-test
- state:
- description: The image's state.
- type: str
- sample: READY
- used:
- description: The image's usage status.
- type: bool
- sample: true
- running_vms:
- description: The image's count of running vms that use this image.
- type: int
- sample: 7
- permissions:
- description: The image's permissions.
- type: dict
- version_added: 9.5.0
- contains:
- owner_u:
- description: The image's owner USAGE permissions.
- type: str
- sample: 1
- owner_m:
- description: The image's owner MANAGE permissions.
- type: str
- sample: 0
- owner_a:
- description: The image's owner ADMIN permissions.
- type: str
- sample: 0
- group_u:
- description: The image's group USAGE permissions.
- type: str
- sample: 0
- group_m:
- description: The image's group MANAGE permissions.
- type: str
- sample: 0
- group_a:
- description: The image's group ADMIN permissions.
- type: str
- sample: 0
- other_u:
- description: The image's other users USAGE permissions.
- type: str
- sample: 0
- other_m:
- description: The image's other users MANAGE permissions.
- type: str
- sample: 0
- other_a:
- description: The image's other users ADMIN permissions
- type: str
- sample: 0
- sample:
- owner_u: 1
- owner_m: 0
- owner_a: 0
- group_u: 0
- group_m: 0
- group_a: 0
- other_u: 0
- other_m: 0
- other_a: 0
- type:
- description: The image's type.
- type: int
- sample: 0
- version_added: 9.5.0
- disk_type:
- description: The image's format type.
- type: int
- sample: 0
- version_added: 9.5.0
- persistent:
- description: The image's persistence status (1 means true, 0 means false).
- type: int
- sample: 1
- version_added: 9.5.0
- source:
- description: The image's source.
- type: str
- sample: /var/lib/one//datastores/100/somerandomstringxd
- version_added: 9.5.0
- path:
- description: The image's filesystem path.
- type: str
- sample: /var/tmp/hello.qcow2
- version_added: 9.5.0
- fstype:
- description: The image's filesystem type.
- type: str
- sample: ext4
- version_added: 9.5.0
- size:
- description: The image's size in MegaBytes.
- type: int
- sample: 10000
- version_added: 9.5.0
- cloning_ops:
- description: The image's cloning operations per second.
- type: int
- sample: 0
- version_added: 9.5.0
- cloning_id:
- description: The image's cloning ID.
- type: int
- sample: -1
- version_added: 9.5.0
- target_snapshot:
- description: The image's target snapshot.
- type: int
- sample: 1
- version_added: 9.5.0
- datastore_id:
- description: The image's datastore ID.
- type: int
- sample: 100
- version_added: 9.5.0
- datastore:
- description: The image's datastore name.
- type: int
- sample: image_datastore
- version_added: 9.5.0
- vms:
- description: The image's list of vm ID's.
- type: list
- elements: int
- version_added: 9.5.0
- sample:
- - 1
- - 2
- - 3
- clones:
- description: The image's list of clones ID's.
- type: list
- elements: int
- version_added: 9.5.0
- sample:
- - 1
- - 2
- - 3
- app_clones:
- description: The image's list of app_clones ID's.
- type: list
- elements: int
- version_added: 9.5.0
- sample:
- - 1
- - 2
- - 3
- snapshots:
- description: The image's list of snapshots.
- type: list
- version_added: 9.5.0
- sample:
- - date: 123123
- parent: 1
- size: 10228
- allow_orphans: 1
- children: 0
- active: 1
- name: SampleName
-'''
+ description: A list of images info.
+ type: complex
+ returned: success
+ contains:
+ id:
+ description: The image's ID.
+ type: int
+ sample: 153
+ name:
+ description: The image's name.
+ type: str
+ sample: app1
+ group_id:
+ description: The image's group ID.
+ type: int
+ sample: 1
+ group_name:
+ description: The image's group name.
+ type: str
+ sample: one-users
+ owner_id:
+ description: The image's owner ID.
+ type: int
+ sample: 143
+ owner_name:
+ description: The image's owner name.
+ type: str
+ sample: ansible-test
+ state:
+ description: The image's state.
+ type: str
+ sample: READY
+ used:
+ description: The image's usage status.
+ type: bool
+ sample: true
+ running_vms:
+ description: The image's count of running vms that use this image.
+ type: int
+ sample: 7
+ permissions:
+ description: The image's permissions.
+ type: dict
+ version_added: 9.5.0
+ contains:
+ owner_u:
+ description: The image's owner USAGE permissions.
+ type: str
+ sample: 1
+ owner_m:
+ description: The image's owner MANAGE permissions.
+ type: str
+ sample: 0
+ owner_a:
+ description: The image's owner ADMIN permissions.
+ type: str
+ sample: 0
+ group_u:
+ description: The image's group USAGE permissions.
+ type: str
+ sample: 0
+ group_m:
+ description: The image's group MANAGE permissions.
+ type: str
+ sample: 0
+ group_a:
+ description: The image's group ADMIN permissions.
+ type: str
+ sample: 0
+ other_u:
+ description: The image's other users USAGE permissions.
+ type: str
+ sample: 0
+ other_m:
+ description: The image's other users MANAGE permissions.
+ type: str
+ sample: 0
+ other_a:
+ description: The image's other users ADMIN permissions.
+ type: str
+ sample: 0
+ sample:
+ owner_u: 1
+ owner_m: 0
+ owner_a: 0
+ group_u: 0
+ group_m: 0
+ group_a: 0
+ other_u: 0
+ other_m: 0
+ other_a: 0
+ type:
+ description: The image's type.
+ type: int
+ sample: 0
+ version_added: 9.5.0
+ disk_type:
+ description: The image's format type.
+ type: int
+ sample: 0
+ version_added: 9.5.0
+ persistent:
+ description: The image's persistence status (1 means true, 0 means false).
+ type: int
+ sample: 1
+ version_added: 9.5.0
+ source:
+ description: The image's source.
+ type: str
+ sample: /var/lib/one//datastores/100/somerandomstringxd
+ version_added: 9.5.0
+ path:
+ description: The image's filesystem path.
+ type: str
+ sample: /var/tmp/hello.qcow2
+ version_added: 9.5.0
+ fstype:
+ description: The image's filesystem type.
+ type: str
+ sample: ext4
+ version_added: 9.5.0
+ size:
+ description: The image's size in MegaBytes.
+ type: int
+ sample: 10000
+ version_added: 9.5.0
+ cloning_ops:
+ description: The image's cloning operations per second.
+ type: int
+ sample: 0
+ version_added: 9.5.0
+ cloning_id:
+ description: The image's cloning ID.
+ type: int
+ sample: -1
+ version_added: 9.5.0
+ target_snapshot:
+ description: The image's target snapshot.
+ type: int
+ sample: 1
+ version_added: 9.5.0
+ datastore_id:
+ description: The image's datastore ID.
+ type: int
+ sample: 100
+ version_added: 9.5.0
+ datastore:
+ description: The image's datastore name.
+ type: int
+ sample: image_datastore
+ version_added: 9.5.0
+ vms:
+ description: The image's list of VM ID's.
+ type: list
+ elements: int
+ version_added: 9.5.0
+ sample:
+ - 1
+ - 2
+ - 3
+ clones:
+ description: The image's list of clones ID's.
+ type: list
+ elements: int
+ version_added: 9.5.0
+ sample:
+ - 1
+ - 2
+ - 3
+ app_clones:
+ description: The image's list of app_clones ID's.
+ type: list
+ elements: int
+ version_added: 9.5.0
+ sample:
+ - 1
+ - 2
+ - 3
+ snapshots:
+ description: The image's list of snapshots.
+ type: list
+ version_added: 9.5.0
+ sample:
+ - date: 123123
+ parent: 1
+ size: 10228
+ allow_orphans: 1
+ children: 0
+ active: 1
+ name: SampleName
+"""
from ansible_collections.community.general.plugins.module_utils.opennebula import OpenNebulaModule
@@ -282,8 +281,8 @@ IMAGE_STATES = ['INIT', 'READY', 'USED', 'DISABLED', 'LOCKED', 'ERROR', 'CLONE',
class ImageInfoModule(OpenNebulaModule):
def __init__(self):
argument_spec = dict(
- ids=dict(type='list', aliases=['id'], elements='str', required=False),
- name=dict(type='str', required=False),
+ ids=dict(type='list', aliases=['id'], elements='str'),
+ name=dict(type='str'),
)
mutually_exclusive = [
['ids', 'name'],
diff --git a/plugins/modules/one_service.py b/plugins/modules/one_service.py
index 25ead72c1d..88ccd29d74 100644
--- a/plugins/modules/one_service.py
+++ b/plugins/modules/one_service.py
@@ -8,12 +8,11 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
module: one_service
short_description: Deploy and manage OpenNebula services
description:
- - Manage OpenNebula services
+ - Manage OpenNebula services.
extends_documentation_fragment:
- community.general.attributes
attributes:
@@ -30,11 +29,13 @@ options:
type: str
api_username:
description:
- - Name of the user to login into the OpenNebula OneFlow API server. If not set then the value of the E(ONEFLOW_USERNAME) environment variable is used.
+ - Name of the user to login into the OpenNebula OneFlow API server. If not set then the value of the E(ONEFLOW_USERNAME)
+ environment variable is used.
type: str
api_password:
description:
- - Password of the user to login into OpenNebula OneFlow API server. If not set then the value of the E(ONEFLOW_PASSWORD) environment variable is used.
+ - Password of the user to login into OpenNebula OneFlow API server. If not set then the value of the E(ONEFLOW_PASSWORD)
+ environment variable is used.
type: str
template_name:
description:
@@ -54,8 +55,8 @@ options:
type: str
unique:
description:
- - Setting O(unique=true) will make sure that there is only one service instance running with a name set with O(service_name) when
- instantiating a service from a template specified with O(template_id) or O(template_name). Check examples below.
+ - Setting O(unique=true) ensures that there is only one service instance running with a name set with O(service_name)
+ when instantiating a service from a template specified with O(template_id) or O(template_name). Check examples below.
type: bool
default: false
state:
@@ -67,15 +68,16 @@ options:
type: str
mode:
description:
- - Set permission mode of a service instance in octet format, for example V(0600) to give owner C(use) and C(manage) and nothing to group and others.
+ - Set permission mode of a service instance in octet format, for example V(0600) to give owner C(use) and C(manage)
+ and nothing to group and others.
type: str
owner_id:
description:
- - ID of the user which will be set as the owner of the service.
+ - ID of the user which is set as the owner of the service.
type: int
group_id:
description:
- - ID of the group which will be set as the group of the service.
+ - ID of the group which is set as the group of the service.
type: int
wait:
description:
@@ -89,7 +91,7 @@ options:
type: int
custom_attrs:
description:
- - Dictionary of key/value custom attributes which will be used when instantiating a new service.
+ - Dictionary of key/value custom attributes which is used when instantiating a new service.
default: {}
type: dict
role:
@@ -106,10 +108,10 @@ options:
type: bool
default: false
author:
- - "Milan Ilic (@ilicmilan)"
-'''
+ - "Milan Ilic (@ilicmilan)"
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Instantiate a new service
community.general.one_service:
template_id: 90
@@ -178,57 +180,57 @@ EXAMPLES = '''
role: foo
cardinality: 7
wait: true
-'''
+"""
-RETURN = '''
+RETURN = r"""
service_id:
- description: service id
- type: int
- returned: success
- sample: 153
+ description: Service ID.
+ type: int
+ returned: success
+ sample: 153
service_name:
- description: service name
- type: str
- returned: success
- sample: app1
+ description: Service name.
+ type: str
+ returned: success
+ sample: app1
group_id:
- description: service's group id
- type: int
- returned: success
- sample: 1
+ description: Service's group ID.
+ type: int
+ returned: success
+ sample: 1
group_name:
- description: service's group name
- type: str
- returned: success
- sample: one-users
+ description: Service's group name.
+ type: str
+ returned: success
+ sample: one-users
owner_id:
- description: service's owner id
- type: int
- returned: success
- sample: 143
+ description: Service's owner ID.
+ type: int
+ returned: success
+ sample: 143
owner_name:
- description: service's owner name
- type: str
- returned: success
- sample: ansible-test
+ description: Service's owner name.
+ type: str
+ returned: success
+ sample: ansible-test
state:
- description: state of service instance
- type: str
- returned: success
- sample: RUNNING
+ description: State of service instance.
+ type: str
+ returned: success
+ sample: RUNNING
mode:
- description: service's mode
- type: int
- returned: success
- sample: 660
+ description: Service's mode.
+ type: int
+ returned: success
+ sample: 660
roles:
- description: list of dictionaries of roles, each role is described by name, cardinality, state and nodes ids
- type: list
- returned: success
- sample:
- - {"cardinality": 1,"name": "foo","state": "RUNNING", "ids": [ 123, 456 ]}
- - {"cardinality": 2,"name": "bar","state": "RUNNING", "ids": [ 452, 567, 746 ]}
-'''
+ description: List of dictionaries of roles, each role is described by name, cardinality, state and nodes IDs.
+ type: list
+ returned: success
+ sample:
+ - {"cardinality": 1, "name": "foo", "state": "RUNNING", "ids": [123, 456]}
+ - {"cardinality": 2, "name": "bar", "state": "RUNNING", "ids": [452, 567, 746]}
+"""
import os
from ansible.module_utils.basic import AnsibleModule
diff --git a/plugins/modules/one_template.py b/plugins/modules/one_template.py
index 1fcc81c540..091c4c55a7 100644
--- a/plugins/modules/one_template.py
+++ b/plugins/modules/one_template.py
@@ -9,8 +9,7 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
module: one_template
short_description: Manages OpenNebula templates
@@ -21,27 +20,25 @@ requirements:
- pyone
description:
- - "Manages OpenNebula templates."
-
+ - Manages OpenNebula templates.
attributes:
check_mode:
support: partial
details:
- - Note that check mode always returns C(changed=true) for existing templates, even if the template would not actually change.
+ - Note that check mode always returns C(changed=true) for existing templates, even if the template would not actually
+ change.
diff_mode:
support: none
options:
id:
description:
- - A O(id) of the template you would like to manage. If not set then a
- - new template will be created with the given O(name).
+ - A O(id) of the template you would like to manage. If not set then a new template is created with the given O(name).
type: int
name:
description:
- - A O(name) of the template you would like to manage. If a template with
- - the given name does not exist it will be created, otherwise it will be
- - managed by this module.
+ - A O(name) of the template you would like to manage. If a template with the given name does not exist it is created,
+ otherwise it is managed by this module.
type: str
template:
description:
@@ -54,6 +51,16 @@ options:
choices: ["present", "absent"]
default: present
type: str
+ filter:
+ description:
+ - V(user_primary_group) - Resources belonging to the user's primary group.
+ - V(user) - Resources belonging to the user.
+ - V(all) - All resources.
+ - V(user_groups) - Resources belonging to the user and any of his groups.
+ choices: [user_primary_group, user, all, user_groups]
+ default: user
+ type: str
+ version_added: 10.3.0
extends_documentation_fragment:
- community.general.opennebula
@@ -61,9 +68,9 @@ extends_documentation_fragment:
author:
- "Jyrki Gadinger (@nilsding)"
-'''
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Fetch the TEMPLATE by id
community.general.one_template:
id: 6459
@@ -110,44 +117,44 @@ EXAMPLES = '''
community.general.one_template:
id: 6459
state: absent
-'''
+"""
-RETURN = '''
+RETURN = r"""
id:
- description: template id
- type: int
- returned: when O(state=present)
- sample: 153
+ description: Template ID.
+ type: int
+ returned: when O(state=present)
+ sample: 153
name:
- description: template name
- type: str
- returned: when O(state=present)
- sample: app1
+ description: Template name.
+ type: str
+ returned: when O(state=present)
+ sample: app1
template:
- description: the parsed template
- type: dict
- returned: when O(state=present)
+ description: The parsed template.
+ type: dict
+ returned: when O(state=present)
group_id:
- description: template's group id
- type: int
- returned: when O(state=present)
- sample: 1
+ description: Template's group ID.
+ type: int
+ returned: when O(state=present)
+ sample: 1
group_name:
- description: template's group name
- type: str
- returned: when O(state=present)
- sample: one-users
+ description: Template's group name.
+ type: str
+ returned: when O(state=present)
+ sample: one-users
owner_id:
- description: template's owner id
- type: int
- returned: when O(state=present)
- sample: 143
+ description: Template's owner ID.
+ type: int
+ returned: when O(state=present)
+ sample: 143
owner_name:
- description: template's owner name
- type: str
- returned: when O(state=present)
- sample: ansible-test
-'''
+ description: Template's owner name.
+ type: str
+ returned: when O(state=present)
+ sample: ansible-test
+"""
from ansible_collections.community.general.plugins.module_utils.opennebula import OpenNebulaModule
@@ -156,10 +163,11 @@ from ansible_collections.community.general.plugins.module_utils.opennebula impor
class TemplateModule(OpenNebulaModule):
def __init__(self):
argument_spec = dict(
- id=dict(type='int', required=False),
- name=dict(type='str', required=False),
+ id=dict(type='int'),
+ name=dict(type='str'),
state=dict(type='str', choices=['present', 'absent'], default='present'),
- template=dict(type='str', required=False),
+ template=dict(type='str'),
+ filter=dict(type='str', choices=['user_primary_group', 'user', 'all', 'user_groups'], default='user'),
)
mutually_exclusive = [
@@ -185,10 +193,11 @@ class TemplateModule(OpenNebulaModule):
name = params.get('name')
desired_state = params.get('state')
template_data = params.get('template')
+ filter = params.get('filter')
self.result = {}
- template = self.get_template_instance(id, name)
+ template = self.get_template_instance(id, name, filter)
needs_creation = False
if not template and desired_state != 'absent':
if id:
@@ -200,16 +209,19 @@ class TemplateModule(OpenNebulaModule):
self.result = self.delete_template(template)
else:
if needs_creation:
- self.result = self.create_template(name, template_data)
+ self.result = self.create_template(name, template_data, filter)
else:
- self.result = self.update_template(template, template_data)
+ self.result = self.update_template(template, template_data, filter)
self.exit()
- def get_template(self, predicate):
- # -3 means "Resources belonging to the user"
+ def get_template(self, predicate, filter):
+ # filter was included, for discussions see:
+ # Issue: https://github.com/ansible-collections/community.general/issues/9278
+ # PR: https://github.com/ansible-collections/community.general/pull/9547
# the other two parameters are used for pagination, -1 for both essentially means "return all"
- pool = self.one.templatepool.info(-3, -1, -1)
+ filter_values = {'user_primary_group': -4, 'user': -3, 'all': -2, 'user_groups': -1}
+ pool = self.one.templatepool.info(filter_values[filter], -1, -1)
for template in pool.VMTEMPLATE:
if predicate(template):
@@ -217,17 +229,17 @@ class TemplateModule(OpenNebulaModule):
return None
- def get_template_by_id(self, template_id):
- return self.get_template(lambda template: (template.ID == template_id))
+ def get_template_by_id(self, template_id, filter):
+ return self.get_template(lambda template: (template.ID == template_id), filter)
- def get_template_by_name(self, name):
- return self.get_template(lambda template: (template.NAME == name))
+ def get_template_by_name(self, name, filter):
+ return self.get_template(lambda template: (template.NAME == name), filter)
- def get_template_instance(self, requested_id, requested_name):
+ def get_template_instance(self, requested_id, requested_name, filter):
if requested_id:
- return self.get_template_by_id(requested_id)
+ return self.get_template_by_id(requested_id, filter)
else:
- return self.get_template_by_name(requested_name)
+ return self.get_template_by_name(requested_name, filter)
def get_template_info(self, template):
info = {
@@ -242,21 +254,21 @@ class TemplateModule(OpenNebulaModule):
return info
- def create_template(self, name, template_data):
+ def create_template(self, name, template_data, filter):
if not self.module.check_mode:
self.one.template.allocate("NAME = \"" + name + "\"\n" + template_data)
- result = self.get_template_info(self.get_template_by_name(name))
+ result = self.get_template_info(self.get_template_by_name(name, filter))
result['changed'] = True
return result
- def update_template(self, template, template_data):
+ def update_template(self, template, template_data, filter):
if not self.module.check_mode:
# 0 = replace the whole template
self.one.template.update(template.ID, template_data, 0)
- result = self.get_template_info(self.get_template_by_id(template.ID))
+ result = self.get_template_info(self.get_template_by_id(template.ID, filter))
if self.module.check_mode:
# Unfortunately it is not easy to detect if the template would have changed, therefore always report a change here.
result['changed'] = True
diff --git a/plugins/modules/one_vm.py b/plugins/modules/one_vm.py
index 2f4ee25354..3d23efa036 100644
--- a/plugins/modules/one_vm.py
+++ b/plugins/modules/one_vm.py
@@ -2,6 +2,7 @@
# -*- coding: utf-8 -*-
# Copyright (c) 2017, Milan Ilic
# Copyright (c) 2019, Jan Meerkamp
+# Copyright (c) 2025, Tom Paine
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
@@ -9,12 +10,11 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
module: one_vm
short_description: Creates or terminates OpenNebula instances
description:
- - Manages OpenNebula instances
+ - Manages OpenNebula instances.
requirements:
- pyone
extends_documentation_fragment:
@@ -28,49 +28,47 @@ options:
api_url:
description:
- URL of the OpenNebula RPC server.
- - It is recommended to use HTTPS so that the username/password are not
- transferred over the network unencrypted.
+ - It is recommended to use HTTPS so that the username/password are not transferred over the network unencrypted.
- If not set then the value of the E(ONE_URL) environment variable is used.
type: str
api_username:
description:
- - Name of the user to login into the OpenNebula RPC server. If not set
- then the value of the E(ONE_USERNAME) environment variable is used.
+ - Name of the user to login into the OpenNebula RPC server. If not set then the value of the E(ONE_USERNAME) environment
+ variable is used.
type: str
api_password:
description:
- - Password of the user to login into OpenNebula RPC server. If not set
- then the value of the E(ONE_PASSWORD) environment variable is used.
- if both O(api_username) or O(api_password) are not set, then it will try
- authenticate with ONE auth file. Default path is "~/.one/one_auth".
+ - Password of the user to login into OpenNebula RPC server. If not set then the value of the E(ONE_PASSWORD) environment
+ variable is used. if both O(api_username) or O(api_password) are not set, then it tries to authenticate with ONE auth
+ file. Default path is C(~/.one/one_auth).
- Set environment variable E(ONE_AUTH) to override this path.
type: str
template_name:
description:
- - Name of VM template to use to create a new instance
+ - Name of VM template to use to create a new instance.
type: str
template_id:
description:
- - ID of a VM template to use to create a new instance
+ - ID of a VM template to use to create a new instance.
type: int
vm_start_on_hold:
description:
- - Set to true to put vm on hold while creating
+ - Set to true to put VM on hold while creating.
default: false
type: bool
instance_ids:
description:
- - 'A list of instance ids used for states: V(absent), V(running), V(rebooted), V(poweredoff).'
+ - 'A list of instance IDs used for states: V(absent), V(running), V(rebooted), V(poweredoff).'
aliases: ['ids']
type: list
elements: int
state:
description:
- V(present) - create instances from a template specified with C(template_id)/C(template_name).
- - V(running) - run instances
- - V(poweredoff) - power-off instances
- - V(rebooted) - reboot instances
- - V(absent) - terminate instances
+ - V(running) - run instances.
+ - V(poweredoff) - power-off instances.
+ - V(rebooted) - reboot instances.
+ - V(absent) - terminate instances.
choices: ["present", "absent", "running", "rebooted", "poweredoff"]
default: present
type: str
@@ -81,96 +79,85 @@ options:
type: bool
wait:
description:
- - Wait for the instance to reach its desired state before returning. Keep
- in mind if you are waiting for instance to be in running state it
- doesn't mean that you will be able to SSH on that machine only that
- boot process have started on that instance, see 'wait_for' example for
- details.
+ - Wait for the instance to reach its desired state before returning. Keep in mind if you are waiting for instance to
+ be in running state it does not mean that you are able to SSH on that machine only that boot process have started
+ on that instance. See the example using the M(ansible.builtin.wait_for) module for details.
default: true
type: bool
wait_timeout:
description:
- - How long before wait gives up, in seconds
+ - How long before wait gives up, in seconds.
default: 300
type: int
attributes:
description:
- - A dictionary of key/value attributes to add to new instances, or for
- setting C(state) of instances with these attributes.
+ - A dictionary of key/value attributes to add to new instances, or for setting C(state) of instances with these attributes.
- Keys are case insensitive and OpenNebula automatically converts them to upper case.
- - Be aware C(NAME) is a special attribute which sets the name of the VM when it's deployed.
- - C(#) character(s) can be appended to the C(NAME) and the module will automatically add
- indexes to the names of VMs.
- - For example':' C(NAME':' foo-###) would create VMs with names C(foo-000), C(foo-001),...
- - When used with O(count_attributes) and O(exact_count) the module will
- match the base name without the index part.
+ - Be aware V(NAME) is a special attribute which sets the name of the VM when it is deployed.
+ - C(#) character(s) can be appended to the C(NAME) and the module automatically adds indexes to the names of VMs.
+ - 'For example: V(NAME: foo-###) would create VMs with names V(foo-000), V(foo-001),...'
+ - When used with O(count_attributes) and O(exact_count) the module matches the base name without the index part.
default: {}
type: dict
labels:
description:
- - A list of labels to associate with new instances, or for setting
- C(state) of instances with these labels.
+ - A list of labels to associate with new instances, or for setting C(state) of instances with these labels.
default: []
type: list
elements: str
count_attributes:
description:
- - A dictionary of key/value attributes that can only be used with
- O(exact_count) to determine how many nodes based on a specific
- attributes criteria should be deployed. This can be expressed in
- multiple ways and is shown in the EXAMPLES section.
+ - A dictionary of key/value attributes that can only be used with O(exact_count) to determine how many nodes based on
+ a specific attributes criteria should be deployed. This can be expressed in multiple ways and is shown in the EXAMPLES
+ section.
type: dict
count_labels:
description:
- - A list of labels that can only be used with O(exact_count) to determine
- how many nodes based on a specific labels criteria should be deployed.
- This can be expressed in multiple ways and is shown in the EXAMPLES
- section.
+ - A list of labels that can only be used with O(exact_count) to determine how many nodes based on a specific labels
+ criteria should be deployed. This can be expressed in multiple ways and is shown in the EXAMPLES section.
type: list
elements: str
count:
description:
- - Number of instances to launch
+ - Number of instances to launch.
default: 1
type: int
exact_count:
description:
- - Indicates how many instances that match O(count_attributes) and
- O(count_labels) parameters should be deployed. Instances are either
- created or terminated based on this value.
- - 'B(NOTE:) Instances with the least IDs will be terminated first.'
+ - Indicates how many instances that match O(count_attributes) and O(count_labels) parameters should be deployed. Instances
+ are either created or terminated based on this value.
+ - B(NOTE:) Instances with the least IDs are terminated first.
type: int
mode:
description:
- - Set permission mode of the instance in octet format, for example V(0600) to give owner C(use) and C(manage) and nothing to group and others.
+ - Set permission mode of the instance in octet format, for example V(0600) to give owner C(use) and C(manage) and nothing
+ to group and others.
type: str
owner_id:
description:
- - ID of the user which will be set as the owner of the instance
+ - ID of the user which is set as the owner of the instance.
type: int
group_id:
description:
- - ID of the group which will be set as the group of the instance
+ - ID of the group which is set as the group of the instance.
type: int
memory:
description:
- - The size of the memory for new instances (in MB, GB, ...)
+ - The size of the memory for new instances (in MB, GB, ..).
type: str
disk_size:
description:
- The size of the disk created for new instances (in MB, GB, TB,...).
- - 'B(NOTE:) If The Template hats Multiple Disks the Order of the Sizes is
- matched against the order specified in O(template_id)/O(template_name).'
+ - B(NOTE:) If The Template hats Multiple Disks the Order of the Sizes is matched against the order specified in O(template_id)/O(template_name).
type: list
elements: str
cpu:
description:
- - Percentage of CPU divided by 100 required for the new instance. Half a
- processor is written 0.5.
+ - Percentage of CPU divided by 100 required for the new instance. Half a processor is written 0.5.
type: float
vcpu:
description:
- - Number of CPUs (cores) new VM will have.
+ - Number of CPUs (cores) the new VM uses.
type: int
networks:
description:
@@ -183,9 +170,9 @@ options:
- Creates an image from a VM disk.
- It is a dictionary where you have to specify C(name) of the new image.
- Optionally you can specify C(disk_id) of the disk you want to save. By default C(disk_id) is 0.
- - 'B(NOTE:) This operation will only be performed on the first VM (if more than one VM ID is passed)
- and the VM has to be in the C(poweredoff) state.'
- - Also this operation will fail if an image with specified C(name) already exists.
+ - B(NOTE:) This operation is only performed on the first VM (if more than one VM ID is passed) and the VM has to be
+ in the C(poweredoff) state.
+ - Also this operation fails if an image with specified C(name) already exists.
type: dict
persistent:
description:
@@ -195,28 +182,40 @@ options:
version_added: '0.2.0'
datastore_id:
description:
- - Name of Datastore to use to create a new instance
+ - Name of Datastore to use to create a new instance.
version_added: '0.2.0'
type: int
datastore_name:
description:
- - Name of Datastore to use to create a new instance
+ - Name of Datastore to use to create a new instance.
version_added: '0.2.0'
type: str
updateconf:
description:
- When O(instance_ids) is provided, updates running VMs with the C(updateconf) API call.
- - When new VMs are being created, emulates the C(updateconf) API call via direct template merge.
+ - When new VMs are being created, emulates the C(updateconf) API call using direct template merge.
- Allows for complete modifications of the C(CONTEXT) attribute.
+ - 'Supported attributes include:'
+ - B(BACKUP_CONFIG:) V(BACKUP_VOLATILE), V(FS_FREEZE), V(INCREMENT_MODE), V(KEEP_LAST), V(MODE);
+ - B(CONTEXT:) (Any value, except V(ETH*). Variable substitutions are made);
+ - B(CPU_MODEL:) V(FEATURES), V(MODEL);
+ - B(FEATURES:) V(ACPI), V(APIC), V(GUEST_AGENT), V(HYPERV), V(IOTHREADS), V(LOCALTIME), V(PAE), V(VIRTIO_BLK_QUEUES),
+ V(VIRTIO_SCSI_QUEUES);
+ - B(GRAPHICS:) V(COMMAND), V(KEYMAP), V(LISTEN), V(PASSWD), V(PORT), V(TYPE);
+ - B(INPUT:) V(BUS), V(TYPE);
+ - B(OS:) V(ARCH), V(BOOT), V(BOOTLOADER), V(FIRMWARE), V(INITRD), V(KERNEL), V(KERNEL_CMD), V(MACHINE), V(ROOT), V(SD_DISK_BUS),
+ V(UUID);
+ - B(RAW:) V(DATA), V(DATA_VMX), V(TYPE), V(VALIDATE);
+ - B(VIDEO:) V(ATS), V(IOMMU), V(RESOLUTION), V(TYPE), V(VRAM).
type: dict
version_added: 6.3.0
author:
- - "Milan Ilic (@ilicmilan)"
- - "Jan Meerkamp (@meerkampdvv)"
-'''
+ - "Milan Ilic (@ilicmilan)"
+ - "Jan Meerkamp (@meerkampdvv)"
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Create a new instance
community.general.one_vm:
template_id: 90
@@ -356,8 +355,9 @@ EXAMPLES = '''
register: vm
- name: Wait for SSH to come up
- ansible.builtin.wait_for_connection:
- delegate_to: '{{ vm.instances[0].networks[0].ip }}'
+ ansible.builtin.wait_for:
+ port: 22
+ host: '{{ vm.instances[0].networks[0].ip }}'
- name: Terminate VMs by ids
community.general.one_vm:
@@ -441,241 +441,241 @@ EXAMPLES = '''
SSH_PUBLIC_KEY: |-
ssh-rsa ...
ssh-ed25519 ...
-'''
+"""
-RETURN = '''
+RETURN = r"""
instances_ids:
- description: a list of instances ids whose state is changed or which are fetched with O(instance_ids) option.
- type: list
- returned: success
- sample: [ 1234, 1235 ]
+ description: A list of instances IDs whose state is changed or which are fetched with O(instance_ids) option.
+ type: list
+ returned: success
+ sample: [1234, 1235]
instances:
- description: a list of instances info whose state is changed or which are fetched with O(instance_ids) option.
- type: complex
- returned: success
- contains:
- vm_id:
- description: vm id
- type: int
- sample: 153
- vm_name:
- description: vm name
- type: str
- sample: foo
- template_id:
- description: vm's template id
- type: int
- sample: 153
- group_id:
- description: vm's group id
- type: int
- sample: 1
- group_name:
- description: vm's group name
- type: str
- sample: one-users
- owner_id:
- description: vm's owner id
- type: int
- sample: 143
- owner_name:
- description: vm's owner name
- type: str
- sample: app-user
- mode:
- description: vm's mode
- type: str
- returned: success
- sample: 660
- state:
- description: state of an instance
- type: str
- sample: ACTIVE
- lcm_state:
- description: lcm state of an instance that is only relevant when the state is ACTIVE
- type: str
- sample: RUNNING
- cpu:
- description: Percentage of CPU divided by 100
- type: float
- sample: 0.2
- vcpu:
- description: Number of CPUs (cores)
- type: int
- sample: 2
- memory:
- description: The size of the memory in MB
- type: str
- sample: 4096 MB
- disk_size:
- description: The size of the disk in MB
- type: str
- sample: 20480 MB
- networks:
- description: a list of dictionaries with info about IP, NAME, MAC, SECURITY_GROUPS for each NIC
- type: list
- sample: [
- {
- "ip": "10.120.5.33",
- "mac": "02:00:0a:78:05:21",
- "name": "default-test-private",
- "security_groups": "0,10"
- },
- {
- "ip": "10.120.5.34",
- "mac": "02:00:0a:78:05:22",
- "name": "default-test-private",
- "security_groups": "0"
- }
- ]
- uptime_h:
- description: Uptime of the instance in hours
- type: int
- sample: 35
- labels:
- description: A list of string labels that are associated with the instance
- type: list
- sample: [
- "foo",
- "spec-label"
- ]
- attributes:
- description: A dictionary of key/values attributes that are associated with the instance
- type: dict
- sample: {
- "HYPERVISOR": "kvm",
- "LOGO": "images/logos/centos.png",
- "TE_GALAXY": "bar",
- "USER_INPUTS": null
- }
- updateconf:
- description: A dictionary of key/values attributes that are set with the updateconf API call.
- type: dict
- version_added: 6.3.0
- sample: {
- "OS": { "ARCH": "x86_64" },
- "CONTEXT": {
- "START_SCRIPT": "ip r r 169.254.16.86/32 dev eth0",
- "SSH_PUBLIC_KEY": "ssh-rsa ...\\nssh-ed25519 ..."
- }
- }
+ description: A list of instances info whose state is changed or which are fetched with O(instance_ids) option.
+ type: complex
+ returned: success
+ contains:
+ vm_id:
+ description: VM ID.
+ type: int
+ sample: 153
+ vm_name:
+ description: VM name.
+ type: str
+ sample: foo
+ template_id:
+ description: VM's template ID.
+ type: int
+ sample: 153
+ group_id:
+ description: VM's group ID.
+ type: int
+ sample: 1
+ group_name:
+ description: VM's group name.
+ type: str
+ sample: one-users
+ owner_id:
+ description: VM's owner ID.
+ type: int
+ sample: 143
+ owner_name:
+ description: VM's owner name.
+ type: str
+ sample: app-user
+ mode:
+ description: VM's mode.
+ type: str
+ returned: success
+ sample: 660
+ state:
+ description: State of an instance.
+ type: str
+ sample: ACTIVE
+ lcm_state:
+ description: Lcm state of an instance that is only relevant when the state is ACTIVE.
+ type: str
+ sample: RUNNING
+ cpu:
+ description: Percentage of CPU divided by 100.
+ type: float
+ sample: 0.2
+ vcpu:
+ description: Number of CPUs (cores).
+ type: int
+ sample: 2
+ memory:
+ description: The size of the memory in MB.
+ type: str
+ sample: 4096 MB
+ disk_size:
+ description: The size of the disk in MB.
+ type: str
+ sample: 20480 MB
+ networks:
+ description: A list of dictionaries with info about IP, NAME, MAC, SECURITY_GROUPS for each NIC.
+ type: list
+ sample:
+ [
+ {
+ "ip": "10.120.5.33",
+ "mac": "02:00:0a:78:05:21",
+ "name": "default-test-private",
+ "security_groups": "0,10"
+ },
+ {
+ "ip": "10.120.5.34",
+ "mac": "02:00:0a:78:05:22",
+ "name": "default-test-private",
+ "security_groups": "0"
+ }
+ ]
+ uptime_h:
+ description: Uptime of the instance in hours.
+ type: int
+ sample: 35
+ labels:
+ description: A list of string labels that are associated with the instance.
+ type: list
+ sample: ["foo", "spec-label"]
+ attributes:
+ description: A dictionary of key/values attributes that are associated with the instance.
+ type: dict
+ sample:
+ {
+ "HYPERVISOR": "kvm",
+ "LOGO": "images/logos/centos.png",
+ "TE_GALAXY": "bar",
+ "USER_INPUTS": null
+ }
+ updateconf:
+ description: A dictionary of key/values attributes that are set with the updateconf API call.
+ type: dict
+ version_added: 6.3.0
+ sample:
+ {
+ "OS": {
+ "ARCH": "x86_64"
+ },
+ "CONTEXT": {
+ "START_SCRIPT": "ip r r 169.254.16.86/32 dev eth0",
+ "SSH_PUBLIC_KEY": "ssh-rsa ...\\nssh-ed25519 ..."
+ }
+ }
tagged_instances:
- description:
- - A list of instances info based on a specific attributes and/or
- - labels that are specified with O(count_attributes) and O(count_labels)
- - options.
- type: complex
- returned: success
- contains:
- vm_id:
- description: vm id
- type: int
- sample: 153
- vm_name:
- description: vm name
- type: str
- sample: foo
- template_id:
- description: vm's template id
- type: int
- sample: 153
- group_id:
- description: vm's group id
- type: int
- sample: 1
- group_name:
- description: vm's group name
- type: str
- sample: one-users
- owner_id:
- description: vm's user id
- type: int
- sample: 143
- owner_name:
- description: vm's user name
- type: str
- sample: app-user
- mode:
- description: vm's mode
- type: str
- returned: success
- sample: 660
- state:
- description: state of an instance
- type: str
- sample: ACTIVE
- lcm_state:
- description: lcm state of an instance that is only relevant when the state is ACTIVE
- type: str
- sample: RUNNING
- cpu:
- description: Percentage of CPU divided by 100
- type: float
- sample: 0.2
- vcpu:
- description: Number of CPUs (cores)
- type: int
- sample: 2
- memory:
- description: The size of the memory in MB
- type: str
- sample: 4096 MB
- disk_size:
- description: The size of the disk in MB
- type: list
- sample: [
- "20480 MB",
- "10240 MB"
- ]
- networks:
- description: a list of dictionaries with info about IP, NAME, MAC, SECURITY_GROUPS for each NIC
- type: list
- sample: [
- {
- "ip": "10.120.5.33",
- "mac": "02:00:0a:78:05:21",
- "name": "default-test-private",
- "security_groups": "0,10"
- },
- {
- "ip": "10.120.5.34",
- "mac": "02:00:0a:78:05:22",
- "name": "default-test-private",
- "security_groups": "0"
- }
- ]
- uptime_h:
- description: Uptime of the instance in hours
- type: int
- sample: 35
- labels:
- description: A list of string labels that are associated with the instance
- type: list
- sample: [
- "foo",
- "spec-label"
- ]
- attributes:
- description: A dictionary of key/values attributes that are associated with the instance
- type: dict
- sample: {
- "HYPERVISOR": "kvm",
- "LOGO": "images/logos/centos.png",
- "TE_GALAXY": "bar",
- "USER_INPUTS": null
- }
- updateconf:
- description: A dictionary of key/values attributes that are set with the updateconf API call
- type: dict
- version_added: 6.3.0
- sample: {
- "OS": { "ARCH": "x86_64" },
- "CONTEXT": {
- "START_SCRIPT": "ip r r 169.254.16.86/32 dev eth0",
- "SSH_PUBLIC_KEY": "ssh-rsa ...\\nssh-ed25519 ..."
- }
- }
-'''
+ description:
+ - A list of instances info based on a specific attributes and/or labels that are specified with O(count_attributes) and
+ O(count_labels) options.
+ type: complex
+ returned: success
+ contains:
+ vm_id:
+ description: VM ID.
+ type: int
+ sample: 153
+ vm_name:
+ description: VM name.
+ type: str
+ sample: foo
+ template_id:
+ description: VM's template ID.
+ type: int
+ sample: 153
+ group_id:
+ description: VM's group ID.
+ type: int
+ sample: 1
+ group_name:
+ description: VM's group name.
+ type: str
+ sample: one-users
+ owner_id:
+ description: VM's user ID.
+ type: int
+ sample: 143
+ owner_name:
+ description: VM's user name.
+ type: str
+ sample: app-user
+ mode:
+ description: VM's mode.
+ type: str
+ returned: success
+ sample: 660
+ state:
+ description: State of an instance.
+ type: str
+ sample: ACTIVE
+ lcm_state:
+ description: Lcm state of an instance that is only relevant when the state is ACTIVE.
+ type: str
+ sample: RUNNING
+ cpu:
+ description: Percentage of CPU divided by 100.
+ type: float
+ sample: 0.2
+ vcpu:
+ description: Number of CPUs (cores).
+ type: int
+ sample: 2
+ memory:
+ description: The size of the memory in MB.
+ type: str
+ sample: 4096 MB
+ disk_size:
+ description: The size of the disk in MB.
+ type: list
+ sample: ["20480 MB", "10240 MB"]
+ networks:
+ description: A list of dictionaries with info about IP, NAME, MAC, SECURITY_GROUPS for each NIC.
+ type: list
+ sample:
+ [
+ {
+ "ip": "10.120.5.33",
+ "mac": "02:00:0a:78:05:21",
+ "name": "default-test-private",
+ "security_groups": "0,10"
+ },
+ {
+ "ip": "10.120.5.34",
+ "mac": "02:00:0a:78:05:22",
+ "name": "default-test-private",
+ "security_groups": "0"
+ }
+ ]
+ uptime_h:
+ description: Uptime of the instance in hours.
+ type: int
+ sample: 35
+ labels:
+ description: A list of string labels that are associated with the instance.
+ type: list
+ sample: ["foo", "spec-label"]
+ attributes:
+ description: A dictionary of key/values attributes that are associated with the instance.
+ type: dict
+ sample:
+ {
+ "HYPERVISOR": "kvm",
+ "LOGO": "images/logos/centos.png",
+ "TE_GALAXY": "bar",
+ "USER_INPUTS": null
+ }
+ updateconf:
+ description: A dictionary of key/values attributes that are set with the updateconf API call.
+ type: dict
+ version_added: 6.3.0
+ sample:
+ {
+ "OS": {
+ "ARCH": "x86_64"
+ },
+ "CONTEXT": {
+ "START_SCRIPT": "ip r r 169.254.16.86/32 dev eth0",
+ "SSH_PUBLIC_KEY": "ssh-rsa ...\\nssh-ed25519 ..."
+ }
+ }
+"""
try:
import pyone
@@ -692,13 +692,17 @@ from ansible.module_utils.common.dict_transformations import dict_merge
from ansible_collections.community.general.plugins.module_utils.opennebula import flatten, render
+# Updateconf attributes documentation: https://docs.opennebula.io/6.10/integration_and_development/system_interfaces/api.html#one-vm-updateconf
UPDATECONF_ATTRIBUTES = {
- "OS": ["ARCH", "MACHINE", "KERNEL", "INITRD", "BOOTLOADER", "BOOT", "SD_DISK_BUS", "UUID"],
- "FEATURES": ["ACPI", "PAE", "APIC", "LOCALTIME", "HYPERV", "GUEST_AGENT"],
+ "OS": ["ARCH", "MACHINE", "KERNEL", "INITRD", "BOOTLOADER", "BOOT", "SD_DISK_BUS", "UUID", "FIRMWARE"],
+ "CPU_MODEL": ["MODEL", "FEATURES"],
+ "FEATURES": ["ACPI", "PAE", "APIC", "LOCALTIME", "HYPERV", "GUEST_AGENT", "VIRTIO_BLK_QUEUES", "VIRTIO_SCSI_QUEUES", "IOTHREADS"],
"INPUT": ["TYPE", "BUS"],
- "GRAPHICS": ["TYPE", "LISTEN", "PASSWD", "KEYMAP"],
- "RAW": ["DATA", "DATA_VMX", "TYPE"],
+ "GRAPHICS": ["TYPE", "LISTEN", "PORT", "PASSWD", "KEYMAP", "COMMAND"],
+ "VIDEO": ["ATS", "IOMMU", "RESOLUTION", "TYPE", "VRAM"],
+ "RAW": ["DATA", "DATA_VMX", "TYPE", "VALIDATE"],
"CONTEXT": [],
+ "BACKUP_CONFIG": ["FS_FREEZE", "KEEP_LAST", "BACKUP_VOLATILE", "MODE", "INCREMENT_MODE"],
}
diff --git a/plugins/modules/one_vnet.py b/plugins/modules/one_vnet.py
index 2dcf20de5f..b77530e756 100644
--- a/plugins/modules/one_vnet.py
+++ b/plugins/modules/one_vnet.py
@@ -9,8 +9,7 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
module: one_vnet
short_description: Manages OpenNebula virtual networks
version_added: 9.4.0
@@ -23,20 +22,20 @@ attributes:
check_mode:
support: partial
details:
- - Note that check mode always returns C(changed=true) for existing networks, even if the network would not actually change.
+ - Note that check mode always returns C(changed=true) for existing networks, even if the network would not actually
+ change.
diff_mode:
support: none
options:
id:
description:
- A O(id) of the network you would like to manage.
- - If not set then a new network will be created with the given O(name).
+ - If not set then a new network is created with the given O(name).
type: int
name:
description:
- - A O(name) of the network you would like to manage. If a network with
- the given name does not exist it will be created, otherwise it will be
- managed by this module.
+ - A O(name) of the network you would like to manage. If a network with the given name does not exist it, then is created,
+ otherwise it is managed by this module.
type: str
template:
description:
@@ -53,9 +52,9 @@ options:
extends_documentation_fragment:
- community.general.opennebula
- community.general.attributes
-'''
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Make sure the network is present by ID
community.general.one_vnet:
id: 0
@@ -87,174 +86,174 @@ EXAMPLES = '''
community.general.one_vnet:
id: 0
state: absent
-'''
+"""
-RETURN = '''
+RETURN = r"""
id:
- description: The network id.
- type: int
- returned: when O(state=present)
- sample: 153
+ description: The network ID.
+ type: int
+ returned: when O(state=present)
+ sample: 153
name:
- description: The network name.
- type: str
- returned: when O(state=present)
- sample: app1
+ description: The network name.
+ type: str
+ returned: when O(state=present)
+ sample: app1
template:
- description: The parsed network template.
- type: dict
- returned: when O(state=present)
- sample:
- BRIDGE: onebr.1000
- BRIDGE_TYPE: linux
- DESCRIPTION: sampletext
- PHYDEV: eth0
- SECURITY_GROUPS: 0
- VLAN_ID: 1000
- VN_MAD: 802.1Q
+ description: The parsed network template.
+ type: dict
+ returned: when O(state=present)
+ sample:
+ BRIDGE: onebr.1000
+ BRIDGE_TYPE: linux
+ DESCRIPTION: sampletext
+ PHYDEV: eth0
+ SECURITY_GROUPS: 0
+ VLAN_ID: 1000
+ VN_MAD: 802.1Q
user_id:
- description: The network's user name.
- type: int
- returned: when O(state=present)
- sample: 1
+ description: The network's user name.
+ type: int
+ returned: when O(state=present)
+ sample: 1
user_name:
- description: The network's user id.
- type: str
- returned: when O(state=present)
- sample: oneadmin
+ description: The network's user ID.
+ type: str
+ returned: when O(state=present)
+ sample: oneadmin
group_id:
- description: The network's group id.
- type: int
- returned: when O(state=present)
- sample: 1
+ description: The network's group ID.
+ type: int
+ returned: when O(state=present)
+ sample: 1
group_name:
- description: The network's group name.
- type: str
- returned: when O(state=present)
- sample: one-users
+ description: The network's group name.
+ type: str
+ returned: when O(state=present)
+ sample: one-users
owner_id:
- description: The network's owner id.
- type: int
- returned: when O(state=present)
- sample: 143
+ description: The network's owner ID.
+ type: int
+ returned: when O(state=present)
+ sample: 143
owner_name:
- description: The network's owner name.
- type: str
- returned: when O(state=present)
- sample: ansible-test
+ description: The network's owner name.
+ type: str
+ returned: when O(state=present)
+ sample: ansible-test
permissions:
- description: The network's permissions.
- type: dict
- returned: when O(state=present)
- contains:
- owner_u:
- description: The network's owner USAGE permissions.
- type: str
- sample: 1
- owner_m:
- description: The network's owner MANAGE permissions.
- type: str
- sample: 0
- owner_a:
- description: The network's owner ADMIN permissions.
- type: str
- sample: 0
- group_u:
- description: The network's group USAGE permissions.
- type: str
- sample: 0
- group_m:
- description: The network's group MANAGE permissions.
- type: str
- sample: 0
- group_a:
- description: The network's group ADMIN permissions.
- type: str
- sample: 0
- other_u:
- description: The network's other users USAGE permissions.
- type: str
- sample: 0
- other_m:
- description: The network's other users MANAGE permissions.
- type: str
- sample: 0
- other_a:
- description: The network's other users ADMIN permissions
- type: str
- sample: 0
- sample:
- owner_u: 1
- owner_m: 0
- owner_a: 0
- group_u: 0
- group_m: 0
- group_a: 0
- other_u: 0
- other_m: 0
- other_a: 0
+ description: The network's permissions.
+ type: dict
+ returned: when O(state=present)
+ contains:
+ owner_u:
+ description: The network's owner USAGE permissions.
+ type: str
+ sample: 1
+ owner_m:
+ description: The network's owner MANAGE permissions.
+ type: str
+ sample: 0
+ owner_a:
+ description: The network's owner ADMIN permissions.
+ type: str
+ sample: 0
+ group_u:
+ description: The network's group USAGE permissions.
+ type: str
+ sample: 0
+ group_m:
+ description: The network's group MANAGE permissions.
+ type: str
+ sample: 0
+ group_a:
+ description: The network's group ADMIN permissions.
+ type: str
+ sample: 0
+ other_u:
+ description: The network's other users USAGE permissions.
+ type: str
+ sample: 0
+ other_m:
+ description: The network's other users MANAGE permissions.
+ type: str
+ sample: 0
+ other_a:
+ description: The network's other users ADMIN permissions.
+ type: str
+ sample: 0
+ sample:
+ owner_u: 1
+ owner_m: 0
+ owner_a: 0
+ group_u: 0
+ group_m: 0
+ group_a: 0
+ other_u: 0
+ other_m: 0
+ other_a: 0
clusters:
- description: The network's clusters.
- type: list
- returned: when O(state=present)
- sample: [0, 100]
+ description: The network's clusters.
+ type: list
+ returned: when O(state=present)
+ sample: [0, 100]
bridge:
- description: The network's bridge interface.
- type: str
- returned: when O(state=present)
- sample: br0
+ description: The network's bridge interface.
+ type: str
+ returned: when O(state=present)
+ sample: br0
bridge_type:
- description: The network's bridge type.
- type: str
- returned: when O(state=present)
- sample: linux
+ description: The network's bridge type.
+ type: str
+ returned: when O(state=present)
+ sample: linux
parent_network_id:
- description: The network's parent network id.
- type: int
- returned: when O(state=present)
- sample: 1
+ description: The network's parent network ID.
+ type: int
+ returned: when O(state=present)
+ sample: 1
vn_mad:
- description: The network's VN_MAD.
- type: str
- returned: when O(state=present)
- sample: bridge
+ description: The network's VN_MAD.
+ type: str
+ returned: when O(state=present)
+ sample: bridge
phydev:
- description: The network's physical device (NIC).
- type: str
- returned: when O(state=present)
- sample: eth0
+ description: The network's physical device (NIC).
+ type: str
+ returned: when O(state=present)
+ sample: eth0
vlan_id:
- description: The network's VLAN tag.
- type: int
- returned: when O(state=present)
- sample: 1000
+ description: The network's VLAN tag.
+ type: int
+ returned: when O(state=present)
+ sample: 1000
outer_vlan_id:
- description: The network's outer VLAN tag.
- type: int
- returned: when O(state=present)
- sample: 1000
+ description: The network's outer VLAN tag.
+ type: int
+ returned: when O(state=present)
+ sample: 1000
vrouters:
- description: The network's list of virtual routers IDs.
- type: list
- returned: when O(state=present)
- sample: [0, 1]
+ description: The network's list of virtual routers IDs.
+ type: list
+ returned: when O(state=present)
+ sample: [0, 1]
ar_pool:
- description: The network's list of ar_pool.
- type: list
- returned: when O(state=present)
- sample:
- - ar_id: 0
- ip: 192.0.2.1
- mac: 6c:1e:46:01:cd:d1
- size: 20
- type: IP4
- - ar_id: 1
- allocated: 0
- ip: 198.51.100.1
- mac: 5d:9b:c0:9e:f6:e5
- size: 20
- type: IP4
-'''
+ description: The network's list of ar_pool.
+ type: list
+ returned: when O(state=present)
+ sample:
+ - ar_id: 0
+ ip: 192.0.2.1
+ mac: 6c:1e:46:01:cd:d1
+ size: 20
+ type: IP4
+ - ar_id: 1
+ allocated: 0
+ ip: 198.51.100.1
+ mac: 5d:9b:c0:9e:f6:e5
+ size: 20
+ type: IP4
+"""
from ansible_collections.community.general.plugins.module_utils.opennebula import OpenNebulaModule
@@ -264,10 +263,10 @@ class NetworksModule(OpenNebulaModule):
def __init__(self):
argument_spec = dict(
- id=dict(type='int', required=False),
- name=dict(type='str', required=False),
+ id=dict(type='int'),
+ name=dict(type='str'),
state=dict(type='str', choices=['present', 'absent'], default='present'),
- template=dict(type='str', required=False),
+ template=dict(type='str'),
)
mutually_exclusive = [
diff --git a/plugins/modules/oneandone_firewall_policy.py b/plugins/modules/oneandone_firewall_policy.py
index dfcabf6f6e..eca9a8ed70 100644
--- a/plugins/modules/oneandone_firewall_policy.py
+++ b/plugins/modules/oneandone_firewall_policy.py
@@ -7,13 +7,11 @@
from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
module: oneandone_firewall_policy
short_description: Configure 1&1 firewall policy
description:
- - Create, remove, reconfigure, update firewall policies.
- This module has a dependency on 1and1 >= 1.0.
+ - Create, remove, reconfigure, update firewall policies. This module has a dependency on 1and1 >= 1.0.
extends_documentation_fragment:
- community.general.attributes
attributes:
@@ -28,21 +26,19 @@ options:
required: false
type: str
default: 'present'
- choices: [ "present", "absent", "update" ]
+ choices: ["present", "absent", "update"]
auth_token:
description:
- Authenticating API token provided by 1&1.
type: str
api_url:
description:
- - Custom API URL. Overrides the
- ONEANDONE_API_URL environment variable.
+ - Custom API URL. Overrides the E(ONEANDONE_API_URL) environment variable.
type: str
required: false
name:
description:
- - Firewall policy name used with present state. Used as identifier (id or name) when used with absent state.
- maxLength=128
+ - Firewall policy name used with present state. Used as identifier (id or name) when used with absent state. maxLength=128.
type: str
firewall_policy:
description:
@@ -50,61 +46,59 @@ options:
type: str
rules:
description:
- - A list of rules that will be set for the firewall policy.
- Each rule must contain protocol parameter, in addition to three optional parameters
- (port_from, port_to, and source)
+ - List of rules that are set for the firewall policy. Each rule must contain protocol parameter, in addition to three
+ optional parameters (port_from, port_to, and source).
type: list
elements: dict
default: []
add_server_ips:
description:
- - A list of server identifiers (id or name) to be assigned to a firewall policy.
- Used in combination with update state.
+ - A list of server identifiers (ID or name) to be assigned to a firewall policy. Used in combination with update state.
type: list
elements: str
required: false
default: []
remove_server_ips:
description:
- - A list of server IP ids to be unassigned from a firewall policy. Used in combination with update state.
+ - A list of server IP IDs to be unassigned from a firewall policy. Used in combination with update state.
type: list
elements: str
required: false
default: []
add_rules:
description:
- - A list of rules that will be added to an existing firewall policy.
- It is syntax is the same as the one used for rules parameter. Used in combination with update state.
+ - List of rules that are added to an existing firewall policy. It is syntax is the same as the one used for rules parameter.
+ Used in combination with update state.
type: list
elements: dict
required: false
default: []
remove_rules:
description:
- - A list of rule ids that will be removed from an existing firewall policy. Used in combination with update state.
+ - List of rule IDs that are removed from an existing firewall policy. Used in combination with update state.
type: list
elements: str
required: false
default: []
description:
description:
- - Firewall policy description. maxLength=256
+ - Firewall policy description. maxLength=256.
type: str
required: false
wait:
description:
- - wait for the instance to be in state 'running' before returning
+ - Wait for the instance to be in state 'running' before returning.
required: false
default: true
type: bool
wait_timeout:
description:
- - how long before wait gives up, in seconds
+ - How long before wait gives up, in seconds.
type: int
default: 600
wait_interval:
description:
- - Defines the number of seconds to wait when using the _wait_for methods
+ - Defines the number of seconds to wait when using the _wait_for methods.
type: int
default: 5
@@ -112,22 +106,21 @@ requirements:
- "1and1"
author:
- - "Amel Ajdinovic (@aajdinov)"
- - "Ethan Devenport (@edevenport)"
-'''
+ - "Amel Ajdinovic (@aajdinov)"
+ - "Ethan Devenport (@edevenport)"
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Create a firewall policy
community.general.oneandone_firewall_policy:
auth_token: oneandone_private_api_key
name: ansible-firewall-policy
description: Testing creation of firewall policies with ansible
rules:
- -
- protocol: TCP
- port_from: 80
- port_to: 80
- source: 0.0.0.0
+ - protocol: TCP
+ port_from: 80
+ port_to: 80
+ source: 0.0.0.0
wait: true
wait_timeout: 500
@@ -150,8 +143,8 @@ EXAMPLES = '''
auth_token: oneandone_private_api_key
firewall_policy: ansible-firewall-policy-updated
add_server_ips:
- - server_identifier (id or name)
- - server_identifier #2 (id or name)
+ - server_identifier (id or name)
+ - "server_identifier #2 (id or name)"
wait: true
wait_timeout: 500
state: update
@@ -161,7 +154,7 @@ EXAMPLES = '''
auth_token: oneandone_private_api_key
firewall_policy: ansible-firewall-policy-updated
remove_server_ips:
- - B2504878540DBC5F7634EB00A07C1EBD (server's IP id)
+ - B2504878540DBC5F7634EB00A07C1EBD (server's IP id)
wait: true
wait_timeout: 500
state: update
@@ -172,16 +165,14 @@ EXAMPLES = '''
firewall_policy: ansible-firewall-policy-updated
description: Adding rules to an existing firewall policy
add_rules:
- -
- protocol: TCP
- port_from: 70
- port_to: 70
- source: 0.0.0.0
- -
- protocol: TCP
- port_from: 60
- port_to: 60
- source: 0.0.0.0
+ - protocol: TCP
+ port_from: 70
+ port_to: 70
+ source: 0.0.0.0
+ - protocol: TCP
+ port_from: 60
+ port_to: 60
+ source: 0.0.0.0
wait: true
wait_timeout: 500
state: update
@@ -191,21 +182,21 @@ EXAMPLES = '''
auth_token: oneandone_private_api_key
firewall_policy: ansible-firewall-policy-updated
remove_rules:
- - rule_id #1
- - rule_id #2
- - ...
+ - "rule_id #1"
+ - "rule_id #2"
+ - '...'
wait: true
wait_timeout: 500
state: update
-'''
+"""
-RETURN = '''
+RETURN = r"""
firewall_policy:
- description: Information about the firewall policy that was processed
- type: dict
- sample: '{"id": "92B74394A397ECC3359825C1656D67A6", "name": "Default Policy"}'
- returned: always
-'''
+ description: Information about the firewall policy that was processed.
+ type: dict
+ sample: {"id": "92B74394A397ECC3359825C1656D67A6", "name": "Default Policy"}
+ returned: always
+"""
import os
from ansible.module_utils.basic import AnsibleModule
@@ -297,7 +288,7 @@ def _add_firewall_rules(module, oneandone_conn, firewall_id, rules):
if module.check_mode:
firewall_policy_id = get_firewall_policy(oneandone_conn, firewall_id)
- if (firewall_rules and firewall_policy_id):
+ if firewall_rules and firewall_policy_id:
return True
return False
diff --git a/plugins/modules/oneandone_load_balancer.py b/plugins/modules/oneandone_load_balancer.py
index da361ef2dc..5a8ce7b8f0 100644
--- a/plugins/modules/oneandone_load_balancer.py
+++ b/plugins/modules/oneandone_load_balancer.py
@@ -7,13 +7,11 @@
from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
module: oneandone_load_balancer
short_description: Configure 1&1 load balancer
description:
- - Create, remove, update load balancers.
- This module has a dependency on 1and1 >= 1.0.
+ - Create, remove, update load balancers. This module has a dependency on 1and1 >= 1.0.
extends_documentation_fragment:
- community.general.attributes
attributes:
@@ -28,7 +26,7 @@ options:
type: str
required: false
default: 'present'
- choices: [ "present", "absent", "update" ]
+ choices: ["present", "absent", "update"]
auth_token:
description:
- Authenticating API token provided by 1&1.
@@ -39,32 +37,30 @@ options:
type: str
api_url:
description:
- - Custom API URL. Overrides the
- E(ONEANDONE_API_URL) environment variable.
+ - Custom API URL. Overrides the E(ONEANDONE_API_URL) environment variable.
type: str
required: false
name:
description:
- - Load balancer name used with present state. Used as identifier (id or name) when used with absent state.
- maxLength=128
+ - Load balancer name used with present state. Used as identifier (ID or name) when used with absent state. maxLength=128.
type: str
health_check_test:
description:
- Type of the health check. At the moment, HTTP is not allowed.
type: str
- choices: [ "NONE", "TCP", "HTTP", "ICMP" ]
+ choices: ["NONE", "TCP", "HTTP", "ICMP"]
health_check_interval:
description:
- - Health check period in seconds. minimum=5, maximum=300, multipleOf=1
+ - Health check period in seconds. minimum=5, maximum=300, multipleOf=1.
type: str
health_check_path:
description:
- - Url to call for checking. Required for HTTP health check. maxLength=1000
+ - URL to call for checking. Required for HTTP health check. maxLength=1000.
type: str
required: false
health_check_parse:
description:
- - Regular expression to check. Required for HTTP health check. maxLength=64
+ - Regular expression to check. Required for HTTP health check. maxLength=64.
type: str
required: false
persistence:
@@ -73,88 +69,87 @@ options:
type: bool
persistence_time:
description:
- - Persistence time in seconds. Required if persistence is enabled. minimum=30, maximum=1200, multipleOf=1
+ - Persistence time in seconds. Required if persistence is enabled. minimum=30, maximum=1200, multipleOf=1.
type: str
method:
description:
- Balancing procedure.
type: str
- choices: [ "ROUND_ROBIN", "LEAST_CONNECTIONS" ]
+ choices: ["ROUND_ROBIN", "LEAST_CONNECTIONS"]
datacenter:
description:
- - ID or country code of the datacenter where the load balancer will be created.
+ - ID or country code of the datacenter where the load balancer is created.
- If not specified, it defaults to V(US).
type: str
- choices: [ "US", "ES", "DE", "GB" ]
+ choices: ["US", "ES", "DE", "GB"]
required: false
rules:
description:
- - A list of rule objects that will be set for the load balancer. Each rule must contain protocol,
- port_balancer, and port_server parameters, in addition to source parameter, which is optional.
+ - A list of rule objects that are set for the load balancer. Each rule must contain protocol, port_balancer, and port_server
+ parameters, in addition to source parameter, which is optional.
type: list
elements: dict
default: []
description:
description:
- - Description of the load balancer. maxLength=256
+ - Description of the load balancer. maxLength=256.
type: str
required: false
add_server_ips:
description:
- - A list of server identifiers (id or name) to be assigned to a load balancer.
- Used in combination with update state.
+ - A list of server identifiers (id or name) to be assigned to a load balancer. Used in combination with O(state=update).
type: list
elements: str
required: false
default: []
remove_server_ips:
description:
- - A list of server IP ids to be unassigned from a load balancer. Used in combination with update state.
+ - A list of server IP IDs to be unassigned from a load balancer. Used in combination with O(state=update).
type: list
elements: str
required: false
default: []
add_rules:
description:
- - A list of rules that will be added to an existing load balancer.
- It is syntax is the same as the one used for rules parameter. Used in combination with update state.
+ - A list of rules that are added to an existing load balancer. It is syntax is the same as the one used for rules parameter.
+ Used in combination with O(state=update).
type: list
elements: dict
required: false
default: []
remove_rules:
description:
- - A list of rule ids that will be removed from an existing load balancer. Used in combination with update state.
+ - A list of rule IDs that are removed from an existing load balancer. Used in combination with O(state=update).
type: list
elements: str
required: false
default: []
wait:
description:
- - wait for the instance to be in state 'running' before returning
+ - Wait for the instance to be in state 'running' before returning.
required: false
default: true
type: bool
wait_timeout:
description:
- - how long before wait gives up, in seconds
+ - How long before wait gives up, in seconds.
type: int
default: 600
wait_interval:
description:
- - Defines the number of seconds to wait when using the _wait_for methods
+ - Defines the number of seconds to wait when using the _wait_for methods.
type: int
default: 5
requirements:
- - "1and1"
+ - "1and1"
author:
- Amel Ajdinovic (@aajdinov)
- Ethan Devenport (@edevenport)
-'''
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Create a load balancer
community.general.oneandone_load_balancer:
auth_token: oneandone_private_api_key
@@ -167,11 +162,10 @@ EXAMPLES = '''
method: ROUND_ROBIN
datacenter: US
rules:
- -
- protocol: TCP
- port_balancer: 80
- port_server: 80
- source: 0.0.0.0
+ - protocol: TCP
+ port_balancer: 80
+ port_server: 80
+ source: 0.0.0.0
wait: true
wait_timeout: 500
@@ -199,7 +193,7 @@ EXAMPLES = '''
load_balancer: ansible load balancer updated
description: Adding server to a load balancer with ansible
add_server_ips:
- - server identifier (id or name)
+ - server identifier (id or name)
wait: true
wait_timeout: 500
state: update
@@ -210,7 +204,7 @@ EXAMPLES = '''
load_balancer: ansible load balancer updated
description: Removing server from a load balancer with ansible
remove_server_ips:
- - B2504878540DBC5F7634EB00A07C1EBD (server's ip id)
+ - B2504878540DBC5F7634EB00A07C1EBD (server's ip id)
wait: true
wait_timeout: 500
state: update
@@ -221,16 +215,14 @@ EXAMPLES = '''
load_balancer: ansible load balancer updated
description: Adding rules to a load balancer with ansible
add_rules:
- -
- protocol: TCP
- port_balancer: 70
- port_server: 70
- source: 0.0.0.0
- -
- protocol: TCP
- port_balancer: 60
- port_server: 60
- source: 0.0.0.0
+ - protocol: TCP
+ port_balancer: 70
+ port_server: 70
+ source: 0.0.0.0
+ - protocol: TCP
+ port_balancer: 60
+ port_server: 60
+ source: 0.0.0.0
wait: true
wait_timeout: 500
state: update
@@ -241,21 +233,21 @@ EXAMPLES = '''
load_balancer: ansible load balancer updated
description: Adding rules to a load balancer with ansible
remove_rules:
- - rule_id #1
- - rule_id #2
- - ...
+ - "rule_id #1"
+ - "rule_id #2"
+ - '...'
wait: true
wait_timeout: 500
state: update
-'''
+"""
-RETURN = '''
+RETURN = r"""
load_balancer:
- description: Information about the load balancer that was processed
- type: dict
- sample: '{"id": "92B74394A397ECC3359825C1656D67A6", "name": "Default Balancer"}'
- returned: always
-'''
+ description: Information about the load balancer that was processed.
+ type: dict
+ sample: {"id": "92B74394A397ECC3359825C1656D67A6", "name": "Default Balancer"}
+ returned: always
+"""
import os
from ansible.module_utils.basic import AnsibleModule
@@ -352,7 +344,7 @@ def _add_load_balancer_rules(module, oneandone_conn, load_balancer_id, rules):
if module.check_mode:
lb_id = get_load_balancer(oneandone_conn, load_balancer_id)
- if (load_balancer_rules and lb_id):
+ if load_balancer_rules and lb_id:
return True
return False
diff --git a/plugins/modules/oneandone_monitoring_policy.py b/plugins/modules/oneandone_monitoring_policy.py
index abdf8ca7ad..2d8693156c 100644
--- a/plugins/modules/oneandone_monitoring_policy.py
+++ b/plugins/modules/oneandone_monitoring_policy.py
@@ -7,14 +7,12 @@
from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
module: oneandone_monitoring_policy
short_description: Configure 1&1 monitoring policy
description:
- - Create, remove, update monitoring policies
- (and add/remove ports, processes, and servers).
- This module has a dependency on 1and1 >= 1.0.
+ - Create, remove, update monitoring policies (and add/remove ports, processes, and servers). This module has a dependency
+ on 1and1 >= 1.0.
extends_documentation_fragment:
- community.general.attributes
attributes:
@@ -29,20 +27,19 @@ options:
type: str
required: false
default: present
- choices: [ "present", "absent", "update" ]
+ choices: ["present", "absent", "update"]
auth_token:
description:
- Authenticating API token provided by 1&1.
type: str
api_url:
description:
- - Custom API URL. Overrides the
- ONEANDONE_API_URL environment variable.
+ - Custom API URL. Overrides the E(ONEANDONE_API_URL) environment variable.
type: str
required: false
name:
description:
- - Monitoring policy name used with present state. Used as identifier (id or name) when used with absent state. maxLength=128
+ - Monitoring policy name used with present state. Used as identifier (id or name) when used with absent state. maxLength=128.
type: str
monitoring_policy:
description:
@@ -54,19 +51,18 @@ options:
type: str
email:
description:
- - User's email. maxLength=128
+ - User's email. maxLength=128.
type: str
description:
description:
- - Monitoring policy description. maxLength=256
+ - Monitoring policy description. maxLength=256.
type: str
required: false
thresholds:
description:
- - Monitoring policy thresholds. Each of the suboptions have warning and critical,
- which both have alert and value suboptions. Warning is used to set limits for
- warning alerts, critical is used to set critical alerts. alert enables alert,
- and value is used to advise when the value is exceeded.
+ - Monitoring policy thresholds. Each of the suboptions have warning and critical, which both have alert and value suboptions.
+ Warning is used to set limits for warning alerts, critical is used to set critical alerts. alert enables alert, and
+ value is used to advise when the value is exceeded.
type: list
elements: dict
default: []
@@ -93,7 +89,7 @@ options:
required: true
ports:
description:
- - Array of ports that will be monitoring.
+ - Array of ports that are to be monitored.
type: list
elements: dict
default: []
@@ -101,16 +97,16 @@ options:
protocol:
description:
- Internet protocol.
- choices: [ "TCP", "UDP" ]
+ choices: ["TCP", "UDP"]
required: true
port:
description:
- - Port number. minimum=1, maximum=65535
+ - Port number. minimum=1, maximum=65535.
required: true
alert_if:
description:
- Case of alert.
- choices: [ "RESPONDING", "NOT_RESPONDING" ]
+ choices: ["RESPONDING", "NOT_RESPONDING"]
required: true
email_notification:
description:
@@ -118,19 +114,19 @@ options:
required: true
processes:
description:
- - Array of processes that will be monitoring.
+ - Array of processes that are to be monitored.
type: list
elements: dict
default: []
suboptions:
process:
description:
- - Name of the process. maxLength=50
+ - Name of the process. maxLength=50.
required: true
alert_if:
description:
- Case of alert.
- choices: [ "RUNNING", "NOT_RUNNING" ]
+ choices: ["RUNNING", "NOT_RUNNING"]
required: true
add_ports:
description:
@@ -190,18 +186,18 @@ options:
default: []
wait:
description:
- - wait for the instance to be in state 'running' before returning
+ - Wait for the instance to be in state 'running' before returning.
required: false
default: true
type: bool
wait_timeout:
description:
- - how long before wait gives up, in seconds
+ - How long before wait gives up, in seconds.
type: int
default: 600
wait_interval:
description:
- - Defines the number of seconds to wait when using the _wait_for methods
+ - Defines the number of seconds to wait when using the _wait_for methods.
type: int
default: 5
@@ -209,11 +205,11 @@ requirements:
- "1and1"
author:
- - "Amel Ajdinovic (@aajdinov)"
- - "Ethan Devenport (@edevenport)"
-'''
+ - "Amel Ajdinovic (@aajdinov)"
+ - "Ethan Devenport (@edevenport)"
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Create a monitoring policy
community.general.oneandone_monitoring_policy:
auth_token: oneandone_private_api_key
@@ -222,57 +218,50 @@ EXAMPLES = '''
email: your@emailaddress.com
agent: true
thresholds:
- -
- cpu:
- warning:
- value: 80
- alert: false
- critical:
- value: 92
- alert: false
- -
- ram:
- warning:
- value: 80
- alert: false
- critical:
- value: 90
- alert: false
- -
- disk:
- warning:
- value: 80
- alert: false
- critical:
- value: 90
- alert: false
- -
- internal_ping:
- warning:
- value: 50
- alert: false
- critical:
- value: 100
- alert: false
- -
- transfer:
- warning:
- value: 1000
- alert: false
- critical:
- value: 2000
- alert: false
+ - cpu:
+ warning:
+ value: 80
+ alert: false
+ critical:
+ value: 92
+ alert: false
+ - ram:
+ warning:
+ value: 80
+ alert: false
+ critical:
+ value: 90
+ alert: false
+ - disk:
+ warning:
+ value: 80
+ alert: false
+ critical:
+ value: 90
+ alert: false
+ - internal_ping:
+ warning:
+ value: 50
+ alert: false
+ critical:
+ value: 100
+ alert: false
+ - transfer:
+ warning:
+ value: 1000
+ alert: false
+ critical:
+ value: 2000
+ alert: false
ports:
- -
- protocol: TCP
- port: 22
- alert_if: RESPONDING
- email_notification: false
+ - protocol: TCP
+ port: 22
+ alert_if: RESPONDING
+ email_notification: false
processes:
- -
- process: test
- alert_if: NOT_RUNNING
- email_notification: false
+ - process: test
+ alert_if: NOT_RUNNING
+ email_notification: false
wait: true
- name: Destroy a monitoring policy
@@ -289,46 +278,41 @@ EXAMPLES = '''
description: Testing creation of a monitoring policy with ansible updated
email: another@emailaddress.com
thresholds:
- -
- cpu:
- warning:
- value: 70
- alert: false
- critical:
- value: 90
- alert: false
- -
- ram:
- warning:
- value: 70
- alert: false
- critical:
- value: 80
- alert: false
- -
- disk:
- warning:
- value: 70
- alert: false
- critical:
- value: 80
- alert: false
- -
- internal_ping:
- warning:
- value: 60
- alert: false
- critical:
- value: 90
- alert: false
- -
- transfer:
- warning:
- value: 900
- alert: false
- critical:
- value: 1900
- alert: false
+ - cpu:
+ warning:
+ value: 70
+ alert: false
+ critical:
+ value: 90
+ alert: false
+ - ram:
+ warning:
+ value: 70
+ alert: false
+ critical:
+ value: 80
+ alert: false
+ - disk:
+ warning:
+ value: 70
+ alert: false
+ critical:
+ value: 80
+ alert: false
+ - internal_ping:
+ warning:
+ value: 60
+ alert: false
+ critical:
+ value: 90
+ alert: false
+ - transfer:
+ warning:
+ value: 900
+ alert: false
+ critical:
+ value: 1900
+ alert: false
wait: true
state: update
@@ -337,11 +321,10 @@ EXAMPLES = '''
auth_token: oneandone_private_api_key
monitoring_policy: ansible monitoring policy updated
add_ports:
- -
- protocol: TCP
- port: 33
- alert_if: RESPONDING
- email_notification: false
+ - protocol: TCP
+ port: 33
+ alert_if: RESPONDING
+ email_notification: false
wait: true
state: update
@@ -350,18 +333,16 @@ EXAMPLES = '''
auth_token: oneandone_private_api_key
monitoring_policy: ansible monitoring policy updated
update_ports:
- -
- id: existing_port_id
- protocol: TCP
- port: 34
- alert_if: RESPONDING
- email_notification: false
- -
- id: existing_port_id
- protocol: TCP
- port: 23
- alert_if: RESPONDING
- email_notification: false
+ - id: existing_port_id
+ protocol: TCP
+ port: 34
+ alert_if: RESPONDING
+ email_notification: false
+ - id: existing_port_id
+ protocol: TCP
+ port: 23
+ alert_if: RESPONDING
+ email_notification: false
wait: true
state: update
@@ -370,7 +351,7 @@ EXAMPLES = '''
auth_token: oneandone_private_api_key
monitoring_policy: ansible monitoring policy updated
remove_ports:
- - port_id
+ - port_id
state: update
- name: Add a process to a monitoring policy
@@ -378,10 +359,9 @@ EXAMPLES = '''
auth_token: oneandone_private_api_key
monitoring_policy: ansible monitoring policy updated
add_processes:
- -
- process: test_2
- alert_if: NOT_RUNNING
- email_notification: false
+ - process: test_2
+ alert_if: NOT_RUNNING
+ email_notification: false
wait: true
state: update
@@ -390,16 +370,14 @@ EXAMPLES = '''
auth_token: oneandone_private_api_key
monitoring_policy: ansible monitoring policy updated
update_processes:
- -
- id: process_id
- process: test_1
- alert_if: NOT_RUNNING
- email_notification: false
- -
- id: process_id
- process: test_3
- alert_if: NOT_RUNNING
- email_notification: false
+ - id: process_id
+ process: test_1
+ alert_if: NOT_RUNNING
+ email_notification: false
+ - id: process_id
+ process: test_3
+ alert_if: NOT_RUNNING
+ email_notification: false
wait: true
state: update
@@ -408,7 +386,7 @@ EXAMPLES = '''
auth_token: oneandone_private_api_key
monitoring_policy: ansible monitoring policy updated
remove_processes:
- - process_id
+ - process_id
wait: true
state: update
@@ -417,7 +395,7 @@ EXAMPLES = '''
auth_token: oneandone_private_api_key
monitoring_policy: ansible monitoring policy updated
add_servers:
- - server id or name
+ - server id or name
wait: true
state: update
@@ -426,18 +404,18 @@ EXAMPLES = '''
auth_token: oneandone_private_api_key
monitoring_policy: ansible monitoring policy updated
remove_servers:
- - server01
+ - server01
wait: true
state: update
-'''
+"""
-RETURN = '''
+RETURN = r"""
monitoring_policy:
- description: Information about the monitoring policy that was processed
- type: dict
- sample: '{"id": "92B74394A397ECC3359825C1656D67A6", "name": "Default Policy"}'
- returned: always
-'''
+ description: Information about the monitoring policy that was processed.
+ type: dict
+ sample: {"id": "92B74394A397ECC3359825C1656D67A6", "name": "Default Policy"}
+ returned: always
+"""
import os
from ansible.module_utils.basic import AnsibleModule
@@ -559,7 +537,7 @@ def _add_processes(module, oneandone_conn, monitoring_policy_id, processes):
if module.check_mode:
mp_id = get_monitoring_policy(oneandone_conn, monitoring_policy_id)
- if (monitoring_policy_processes and mp_id):
+ if monitoring_policy_processes and mp_id:
return True
return False
diff --git a/plugins/modules/oneandone_private_network.py b/plugins/modules/oneandone_private_network.py
index cf74597edb..f39c464f96 100644
--- a/plugins/modules/oneandone_private_network.py
+++ b/plugins/modules/oneandone_private_network.py
@@ -7,13 +7,11 @@
from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
module: oneandone_private_network
short_description: Configure 1&1 private networking
description:
- - Create, remove, reconfigure, update a private network.
- This module has a dependency on 1and1 >= 1.0.
+ - Create, remove, reconfigure, update a private network. This module has a dependency on 1and1 >= 1.0.
extends_documentation_fragment:
- community.general.attributes
attributes:
@@ -28,7 +26,7 @@ options:
type: str
required: false
default: 'present'
- choices: [ "present", "absent", "update" ]
+ choices: ["present", "absent", "update"]
auth_token:
description:
- Authenticating API token provided by 1&1.
@@ -39,8 +37,7 @@ options:
type: str
api_url:
description:
- - Custom API URL. Overrides the
- ONEANDONE_API_URL environment variable.
+ - Custom API URL. Overrides the E(ONEANDONE_API_URL) environment variable.
type: str
required: false
name:
@@ -53,16 +50,16 @@ options:
type: str
datacenter:
description:
- - The identifier of the datacenter where the private network will be created
+ - The identifier of the datacenter where the private network is created.
type: str
choices: [US, ES, DE, GB]
network_address:
description:
- - Set a private network space, i.e. 192.168.1.0
+ - Set a private network space, for example V(192.168.1.0).
type: str
subnet_mask:
description:
- - Set the netmask for the private network, i.e. 255.255.255.0
+ - Set the netmask for the private network, for example V(255.255.255.0).
type: str
add_members:
description:
@@ -78,30 +75,30 @@ options:
default: []
wait:
description:
- - wait for the instance to be in state 'running' before returning
+ - Wait for the instance to be in state 'running' before returning.
required: false
default: true
type: bool
wait_timeout:
description:
- - how long before wait gives up, in seconds
+ - How long before wait gives up, in seconds.
type: int
default: 600
wait_interval:
description:
- - Defines the number of seconds to wait when using the _wait_for methods
+ - Defines the number of seconds to wait when using the _wait_for methods.
type: int
default: 5
requirements:
- - "1and1"
+ - "1and1"
author:
- Amel Ajdinovic (@aajdinov)
- Ethan Devenport (@edevenport)
-'''
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Create a private network
community.general.oneandone_private_network:
auth_token: oneandone_private_api_key
@@ -131,7 +128,7 @@ EXAMPLES = '''
state: update
private_network: backup_network
add_members:
- - server identifier (id or name)
+ - server identifier (id or name)
- name: Remove members from the private network
community.general.oneandone_private_network:
@@ -139,16 +136,16 @@ EXAMPLES = '''
state: update
private_network: backup_network
remove_members:
- - server identifier (id or name)
-'''
+ - server identifier (id or name)
+"""
-RETURN = '''
+RETURN = r"""
private_network:
- description: Information about the private network.
- type: dict
- sample: '{"name": "backup_network", "id": "55726DEDA20C99CF6F2AF8F18CAC9963"}'
- returned: always
-'''
+ description: Information about the private network.
+ type: dict
+ sample: {"name": "backup_network", "id": "55726DEDA20C99CF6F2AF8F18CAC9963"}
+ returned: always
+"""
import os
from ansible.module_utils.basic import AnsibleModule
diff --git a/plugins/modules/oneandone_public_ip.py b/plugins/modules/oneandone_public_ip.py
index 2dceb41bff..b6b49c5b4a 100644
--- a/plugins/modules/oneandone_public_ip.py
+++ b/plugins/modules/oneandone_public_ip.py
@@ -7,13 +7,11 @@
from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
module: oneandone_public_ip
short_description: Configure 1&1 public IPs
description:
- - Create, update, and remove public IPs.
- This module has a dependency on 1and1 >= 1.0.
+ - Create, update, and remove public IPs. This module has a dependency on 1and1 >= 1.0.
extends_documentation_fragment:
- community.general.attributes
attributes:
@@ -24,29 +22,28 @@ attributes:
options:
state:
description:
- - Define a public ip state to create, remove, or update.
+ - Define a public IP state to create, remove, or update.
type: str
required: false
default: 'present'
- choices: [ "present", "absent", "update" ]
+ choices: ["present", "absent", "update"]
auth_token:
description:
- Authenticating API token provided by 1&1.
type: str
api_url:
description:
- - Custom API URL. Overrides the
- ONEANDONE_API_URL environment variable.
+ - Custom API URL. Overrides the E(ONEANDONE_API_URL) environment variable.
type: str
required: false
reverse_dns:
description:
- - Reverse DNS name. maxLength=256
+ - Reverse DNS name. maxLength=256.
type: str
required: false
datacenter:
description:
- - ID of the datacenter where the IP will be created (only for unassigned IPs).
+ - ID of the datacenter where the IP is created (only for unassigned IPs).
type: str
choices: [US, ES, DE, GB]
default: US
@@ -64,30 +61,30 @@ options:
type: str
wait:
description:
- - wait for the instance to be in state 'running' before returning
+ - Wait for the instance to be in state 'running' before returning.
required: false
default: true
type: bool
wait_timeout:
description:
- - how long before wait gives up, in seconds
+ - How long before wait gives up, in seconds.
type: int
default: 600
wait_interval:
description:
- - Defines the number of seconds to wait when using the _wait_for methods
+ - Defines the number of seconds to wait when using the _wait_for methods.
type: int
default: 5
requirements:
- - "1and1"
+ - "1and1"
author:
- Amel Ajdinovic (@aajdinov)
- Ethan Devenport (@edevenport)
-'''
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Create a public IP
community.general.oneandone_public_ip:
auth_token: oneandone_private_api_key
@@ -107,15 +104,15 @@ EXAMPLES = '''
auth_token: oneandone_private_api_key
public_ip_id: public ip id
state: absent
-'''
+"""
-RETURN = '''
+RETURN = r"""
public_ip:
- description: Information about the public ip that was processed
- type: dict
- sample: '{"id": "F77CC589EBC120905B4F4719217BFF6D", "ip": "10.5.132.106"}'
- returned: always
-'''
+ description: Information about the public IP that was processed.
+ type: dict
+ sample: {"id": "F77CC589EBC120905B4F4719217BFF6D", "ip": "10.5.132.106"}
+ returned: always
+"""
import os
from ansible.module_utils.basic import AnsibleModule
diff --git a/plugins/modules/oneandone_server.py b/plugins/modules/oneandone_server.py
index b6653b48b1..7683ea1480 100644
--- a/plugins/modules/oneandone_server.py
+++ b/plugins/modules/oneandone_server.py
@@ -7,13 +7,12 @@
from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
module: oneandone_server
short_description: Create, destroy, start, stop, and reboot a 1&1 Host server
description:
- - Create, destroy, update, start, stop, and reboot a 1&1 Host server.
- When the server is created it can optionally wait for it to be 'running' before returning.
+ - Create, destroy, update, start, stop, and reboot a 1&1 Host server. When the server is created it can optionally wait
+ for it to be 'running' before returning.
extends_documentation_fragment:
- community.general.attributes
attributes:
@@ -27,23 +26,21 @@ options:
- Define a server's state to create, remove, start or stop it.
type: str
default: present
- choices: [ "present", "absent", "running", "stopped" ]
+ choices: ["present", "absent", "running", "stopped"]
auth_token:
description:
- - Authenticating API token provided by 1&1. Overrides the
- ONEANDONE_AUTH_TOKEN environment variable.
+ - Authenticating API token provided by 1&1. Overrides the E(ONEANDONE_AUTH_TOKEN) environment variable.
type: str
api_url:
description:
- - Custom API URL. Overrides the
- ONEANDONE_API_URL environment variable.
+ - Custom API URL. Overrides the E(ONEANDONE_API_URL) environment variable.
type: str
datacenter:
description:
- The datacenter location.
type: str
default: US
- choices: [ "US", "ES", "DE", "GB" ]
+ choices: ["US", "ES", "DE", "GB"]
hostname:
description:
- The hostname or ID of the server. Only used when state is 'present'.
@@ -54,35 +51,30 @@ options:
type: str
appliance:
description:
- - The operating system name or ID for the server.
- It is required only for 'present' state.
+ - The operating system name or ID for the server. It is required only for 'present' state.
type: str
fixed_instance_size:
description:
- - The instance size name or ID of the server.
- It is required only for 'present' state, and it is mutually exclusive with
- vcore, cores_per_processor, ram, and hdds parameters.
- - 'The available choices are: V(S), V(M), V(L), V(XL), V(XXL), V(3XL), V(4XL), V(5XL)'
+ - The instance size name or ID of the server. It is required only for 'present' state, and it is mutually exclusive
+ with vcore, cores_per_processor, ram, and hdds parameters.
+ - 'The available choices are: V(S), V(M), V(L), V(XL), V(XXL), V(3XL), V(4XL), V(5XL).'
type: str
vcore:
description:
- - The total number of processors.
- It must be provided with cores_per_processor, ram, and hdds parameters.
+ - The total number of processors. It must be provided with O(cores_per_processor), O(ram), and O(hdds) parameters.
type: int
cores_per_processor:
description:
- - The number of cores per processor.
- It must be provided with vcore, ram, and hdds parameters.
+ - The number of cores per processor. It must be provided with O(vcore), O(ram), and O(hdds) parameters.
type: int
ram:
description:
- - The amount of RAM memory.
- It must be provided with with vcore, cores_per_processor, and hdds parameters.
+ - The amount of RAM memory. It must be provided with with O(vcore), O(cores_per_processor), and O(hdds) parameters.
type: float
hdds:
description:
- - A list of hard disks with nested "size" and "is_main" properties.
- It must be provided with vcore, cores_per_processor, and ram parameters.
+ - A list of hard disks with nested O(ignore:hdds[].size) and O(ignore:hdds[].is_main) properties. It must be provided
+ with O(vcore), O(cores_per_processor), and O(ram) parameters.
type: list
elements: dict
private_network:
@@ -119,30 +111,27 @@ options:
- The type of server to be built.
type: str
default: "cloud"
- choices: [ "cloud", "baremetal", "k8s_node" ]
+ choices: ["cloud", "baremetal", "k8s_node"]
wait:
description:
- - Wait for the server to be in state 'running' before returning.
- Also used for delete operation (set to 'false' if you don't want to wait
- for each individual server to be deleted before moving on with
- other tasks.)
+ - Wait for the server to be in state 'running' before returning. Also used for delete operation (set to V(false) if
+ you do not want to wait for each individual server to be deleted before moving on with other tasks).
type: bool
default: true
wait_timeout:
description:
- - how long before wait gives up, in seconds
+ - How long before wait gives up, in seconds.
type: int
default: 600
wait_interval:
description:
- - Defines the number of seconds to wait when using the wait_for methods
+ - Defines the number of seconds to wait when using the wait_for methods.
type: int
default: 5
auto_increment:
description:
- - When creating multiple servers at once, whether to differentiate
- hostnames by appending a count after them or substituting the count
- where there is a %02d or %03d in the hostname string.
+ - When creating multiple servers at once, whether to differentiate hostnames by appending a count after them or substituting
+ the count where there is a %02d or %03d in the hostname string.
type: bool
default: true
@@ -150,12 +139,11 @@ requirements:
- "1and1"
author:
- - "Amel Ajdinovic (@aajdinov)"
- - "Ethan Devenport (@edevenport)"
+ - "Amel Ajdinovic (@aajdinov)"
+ - "Ethan Devenport (@edevenport)"
+"""
-'''
-
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Create three servers and enumerate their names
community.general.oneandone_server:
auth_token: oneandone_private_api_key
@@ -201,16 +189,16 @@ EXAMPLES = '''
auth_token: oneandone_private_api_key
state: stopped
server: 'node01'
-'''
+"""
-RETURN = '''
+RETURN = r"""
servers:
- description: Information about each server that was processed
- type: list
- sample:
- - {"hostname": "my-server", "id": "server-id"}
- returned: always
-'''
+ description: Information about each server that was processed.
+ type: list
+ sample:
+ - {"hostname": "my-server", "id": "server-id"}
+ returned: always
+"""
import os
import time
@@ -530,7 +518,7 @@ def startstop_server(module, oneandone_conn):
# Resolve server
server = get_server(oneandone_conn, server_id, True)
if server:
- # Attempt to change the server state, only if it's not already there
+ # Attempt to change the server state, only if it is not already there
# or on its way.
try:
if state == 'stopped' and server['status']['state'] == 'POWERED_ON':
diff --git a/plugins/modules/onepassword_info.py b/plugins/modules/onepassword_info.py
index b63352790f..5689d28fe6 100644
--- a/plugins/modules/onepassword_info.py
+++ b/plugins/modules/onepassword_info.py
@@ -12,108 +12,110 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
-DOCUMENTATION = '''
+DOCUMENTATION = r"""
module: onepassword_info
author:
- - Ryan Conway (@Rylon)
+ - Ryan Conway (@Rylon)
requirements:
- - C(op) 1Password command line utility. See U(https://support.1password.com/command-line/)
+ - C(op) 1Password command line utility. See U(https://support.1password.com/command-line/)
notes:
- - Tested with C(op) version 0.5.5
- - "Based on the P(community.general.onepassword#lookup) lookup plugin by Scott Buchanan ."
+ - Tested with C(op) version 0.5.5.
+ - Based on the P(community.general.onepassword#lookup) lookup plugin by Scott Buchanan .
short_description: Gather items from 1Password
description:
- - M(community.general.onepassword_info) wraps the C(op) command line utility to fetch data about one or more 1Password items.
- - A fatal error occurs if any of the items being searched for can not be found.
- - Recommend using with the C(no_log) option to avoid logging the values of the secrets being retrieved.
+ - M(community.general.onepassword_info) wraps the C(op) command line utility to fetch data about one or more 1Password items.
+ - A fatal error occurs if any of the items being searched for can not be found.
+ - Recommend using with the C(no_log) option to avoid logging the values of the secrets being retrieved.
extends_documentation_fragment:
- - community.general.attributes
- - community.general.attributes.info_module
+ - community.general.attributes
+ - community.general.attributes.info_module
options:
- search_terms:
- type: list
- elements: dict
+ search_terms:
+ type: list
+ elements: dict
+ description:
+ - A list of one or more search terms.
+ - Each search term can either be a simple string or it can be a dictionary for more control.
+ - When passing a simple string, O(search_terms[].field) is assumed to be V(password).
+ - When passing a dictionary, the following fields are available.
+ suboptions:
+ name:
+ type: str
description:
- - A list of one or more search terms.
- - Each search term can either be a simple string or it can be a dictionary for more control.
- - When passing a simple string, O(search_terms[].field) is assumed to be V(password).
- - When passing a dictionary, the following fields are available.
- suboptions:
- name:
- type: str
- description:
- - The name of the 1Password item to search for (required).
- field:
- type: str
- description:
- - The name of the field to search for within this item (optional, defaults to "password" (or "document" if the item has an attachment).
- section:
- type: str
- description:
- - The name of a section within this item containing the specified field (optional, will search all sections if not specified).
- vault:
- type: str
- description:
- - The name of the particular 1Password vault to search, useful if your 1Password user has access to multiple vaults (optional).
+ - The name of the 1Password item to search for (required).
+ field:
+ type: str
+ description:
+ - The name of the field to search for within this item (optional, defaults to V(password), or V(document) if the
+ item has an attachment).
+ section:
+ type: str
+ description:
+ - The name of a section within this item containing the specified field (optional, it searches all sections if not
+ specified).
+ vault:
+ type: str
+ description:
+ - The name of the particular 1Password vault to search, useful if your 1Password user has access to multiple vaults
+ (optional).
+ required: true
+ auto_login:
+ type: dict
+ description:
+ - A dictionary containing authentication details. If this is set, the module attempts to sign in to 1Password automatically.
+ - Without this option, you must have already logged in using the 1Password CLI before running Ansible.
+ - It is B(highly) recommended to store 1Password credentials in an Ansible Vault. Ensure that the key used to encrypt
+ the Ansible Vault is equal to or greater in strength than the 1Password master password.
+ suboptions:
+ subdomain:
+ type: str
+ description:
+ - 1Password subdomain name (V(subdomain).1password.com).
+ - If this is not specified, the most recent subdomain is used.
+ username:
+ type: str
+ description:
+ - 1Password username.
+ - Only required for initial sign in.
+ master_password:
+ type: str
+ description:
+ - The master password for your subdomain.
+ - This is always required when specifying O(auto_login).
required: true
- auto_login:
- type: dict
+ secret_key:
+ type: str
description:
- - A dictionary containing authentication details. If this is set, M(community.general.onepassword_info)
- will attempt to sign in to 1Password automatically.
- - Without this option, you must have already logged in via the 1Password CLI before running Ansible.
- - It is B(highly) recommended to store 1Password credentials in an Ansible Vault. Ensure that the key used to encrypt
- the Ansible Vault is equal to or greater in strength than the 1Password master password.
- suboptions:
- subdomain:
- type: str
- description:
- - 1Password subdomain name (.1password.com).
- - If this is not specified, the most recent subdomain will be used.
- username:
- type: str
- description:
- - 1Password username.
- - Only required for initial sign in.
- master_password:
- type: str
- description:
- - The master password for your subdomain.
- - This is always required when specifying O(auto_login).
- required: true
- secret_key:
- type: str
- description:
- - The secret key for your subdomain.
- - Only required for initial sign in.
- required: false
- cli_path:
- type: path
- description: Used to specify the exact path to the C(op) command line interface
- required: false
- default: 'op'
-'''
+ - The secret key for your subdomain.
+ - Only required for initial sign in.
+ required: false
+ cli_path:
+ type: path
+ description: Used to specify the exact path to the C(op) command line interface.
+ required: false
+ default: 'op'
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
# Gather secrets from 1Password, assuming there is a 'password' field:
- name: Get a password
community.general.onepassword_info:
search_terms: My 1Password item
delegate_to: localhost
register: my_1password_item
- no_log: true # Don't want to log the secrets to the console!
+ no_log: true # Don't want to log the secrets to the console!
# Gather secrets from 1Password, with more advanced search terms:
- name: Get a password
community.general.onepassword_info:
search_terms:
- - name: My 1Password item
- field: Custom field name # optional, defaults to 'password'
- section: Custom section name # optional, defaults to 'None'
- vault: Name of the vault # optional, only necessary if there is more than 1 Vault available
+ - name: My 1Password item
+ field: Custom field name # optional, defaults to 'password'
+ section: Custom section name # optional, defaults to 'None'
+ vault: Name of the vault # optional, only necessary if there is more than 1 Vault available
delegate_to: localhost
register: my_1password_item
- no_log: true # Don't want to log the secrets to the console!
+ no_log: true # Don't want to log the secrets to the console!
# Gather secrets combining simple and advanced search terms to retrieve two items, one of which we fetch two
# fields. In the first 'password' is fetched, as a field name is not specified (default behaviour) and in the
@@ -121,39 +123,39 @@ EXAMPLES = '''
- name: Get a password
community.general.onepassword_info:
search_terms:
- - My 1Password item # 'name' is optional when passing a simple string...
- - name: My Other 1Password item # ...but it can also be set for consistency
- - name: My 1Password item
- field: Custom field name # optional, defaults to 'password'
- section: Custom section name # optional, defaults to 'None'
- vault: Name of the vault # optional, only necessary if there is more than 1 Vault available
+ - My 1Password item # 'name' is optional when passing a simple string...
+ - name: My Other 1Password item # ...but it can also be set for consistency
+ - name: My 1Password item
+ field: Custom field name # optional, defaults to 'password'
+ section: Custom section name # optional, defaults to 'None'
+ vault: Name of the vault # optional, only necessary if there is more than 1 Vault available
- name: A 1Password item with document attachment
delegate_to: localhost
register: my_1password_item
- no_log: true # Don't want to log the secrets to the console!
+ no_log: true # Don't want to log the secrets to the console!
- name: Debug a password (for example)
ansible.builtin.debug:
msg: "{{ my_1password_item['onepassword']['My 1Password item'] }}"
-'''
+"""
-RETURN = '''
----
+RETURN = r"""
# One or more dictionaries for each matching item from 1Password, along with the appropriate fields.
# This shows the response you would expect to receive from the third example documented above.
onepassword:
- description: Dictionary of each 1password item matching the given search terms, shows what would be returned from the third example above.
- returned: success
- type: dict
- sample:
- "My 1Password item":
- password: the value of this field
- Custom field name: the value of this field
- "My Other 1Password item":
- password: the value of this field
- "A 1Password item with document attachment":
- document: the contents of the document attached to this item
-'''
+ description: Dictionary of each 1password item matching the given search terms, shows what would be returned from the third
+ example above.
+ returned: success
+ type: dict
+ sample:
+ "My 1Password item":
+ password: the value of this field
+ Custom field name: the value of this field
+ "My Other 1Password item":
+ password: the value of this field
+ "A 1Password item with document attachment":
+ document: the contents of the document attached to this item
+"""
import errno
@@ -206,7 +208,7 @@ class OnePasswordInfo(object):
def _parse_field(self, data_json, item_id, field_name, section_title=None):
data = json.loads(data_json)
- if ('documentAttributes' in data['details']):
+ if 'documentAttributes' in data['details']:
# This is actually a document, let's fetch the document data instead!
document = self._run(["get", "document", data['overview']['title']])
return {'document': document[1].strip()}
@@ -216,7 +218,7 @@ class OnePasswordInfo(object):
# Some types of 1Password items have a 'password' field directly alongside the 'fields' attribute,
# not inside it, so we need to check there first.
- if (field_name in data['details']):
+ if field_name in data['details']:
return {field_name: data['details'][field_name]}
# Otherwise we continue looking inside the 'fields' attribute for the specified field.
@@ -372,7 +374,7 @@ def main():
username=dict(type='str'),
master_password=dict(required=True, type='str', no_log=True),
secret_key=dict(type='str', no_log=True),
- ), default=None),
+ )),
search_terms=dict(required=True, type='list', elements='dict'),
),
supports_check_mode=True
diff --git a/plugins/modules/oneview_datacenter_info.py b/plugins/modules/oneview_datacenter_info.py
index ed04e2279f..1ca33023db 100644
--- a/plugins/modules/oneview_datacenter_info.py
+++ b/plugins/modules/oneview_datacenter_info.py
@@ -7,43 +7,41 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
module: oneview_datacenter_info
short_description: Retrieve information about the OneView Data Centers
description:
- - Retrieve information about the OneView Data Centers.
+ - Retrieve information about the OneView Data Centers.
requirements:
- - "hpOneView >= 2.0.1"
+ - "hpOneView >= 2.0.1"
author:
- - Alex Monteiro (@aalexmonteiro)
- - Madhav Bharadwaj (@madhav-bharadwaj)
- - Priyanka Sood (@soodpr)
- - Ricardo Galeno (@ricardogpsf)
+ - Alex Monteiro (@aalexmonteiro)
+ - Madhav Bharadwaj (@madhav-bharadwaj)
+ - Priyanka Sood (@soodpr)
+ - Ricardo Galeno (@ricardogpsf)
attributes:
- check_mode:
- version_added: 3.3.0
- # This was backported to 2.5.4 and 1.3.11 as well, since this was a bugfix
+ check_mode:
+ version_added: 3.3.0
+ # This was backported to 2.5.4 and 1.3.11 as well, since this was a bugfix
options:
- name:
- description:
- - Data Center name.
- type: str
- options:
- description:
- - "Retrieve additional information. Options available: 'visualContent'."
- type: list
- elements: str
+ name:
+ description:
+ - Data Center name.
+ type: str
+ options:
+ description:
+ - 'Retrieve additional information. Options available: V(visualContent).'
+ type: list
+ elements: str
extends_documentation_fragment:
- community.general.oneview
- community.general.oneview.factsparams
- community.general.attributes
- community.general.attributes.info_module
+"""
-'''
-
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Gather information about all Data Centers
community.general.oneview_datacenter_info:
hostname: 172.16.101.48
@@ -107,19 +105,19 @@ EXAMPLES = '''
- name: Print fetched information about Data Center Visual Content
ansible.builtin.debug:
msg: "{{ result.datacenter_visual_content }}"
-'''
+"""
-RETURN = '''
+RETURN = r"""
datacenters:
- description: Has all the OneView information about the Data Centers.
- returned: Always, but can be null.
- type: dict
+ description: Has all the OneView information about the Data Centers.
+ returned: Always, but can be null.
+ type: dict
datacenter_visual_content:
- description: Has information about the Data Center Visual Content.
- returned: When requested, but can be null.
- type: dict
-'''
+ description: Has information about the Data Center Visual Content.
+ returned: When requested, but can be null.
+ type: dict
+"""
from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase
diff --git a/plugins/modules/oneview_enclosure_info.py b/plugins/modules/oneview_enclosure_info.py
index 4e203a50ac..05992ee501 100644
--- a/plugins/modules/oneview_enclosure_info.py
+++ b/plugins/modules/oneview_enclosure_info.py
@@ -8,44 +8,41 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
module: oneview_enclosure_info
short_description: Retrieve information about one or more Enclosures
description:
- - Retrieve information about one or more of the Enclosures from OneView.
+ - Retrieve information about one or more of the Enclosures from OneView.
requirements:
- - hpOneView >= 2.0.1
+ - hpOneView >= 2.0.1
author:
- - Felipe Bulsoni (@fgbulsoni)
- - Thiago Miotto (@tmiotto)
- - Adriane Cardozo (@adriane-cardozo)
+ - Felipe Bulsoni (@fgbulsoni)
+ - Thiago Miotto (@tmiotto)
+ - Adriane Cardozo (@adriane-cardozo)
attributes:
- check_mode:
- version_added: 3.3.0
- # This was backported to 2.5.4 and 1.3.11 as well, since this was a bugfix
+ check_mode:
+ version_added: 3.3.0
+ # This was backported to 2.5.4 and 1.3.11 as well, since this was a bugfix
options:
- name:
- description:
- - Enclosure name.
- type: str
- options:
- description:
- - "List with options to gather additional information about an Enclosure and related resources.
- Options allowed: V(script), V(environmentalConfiguration), and V(utilization). For the option V(utilization),
- you can provide specific parameters."
- type: list
- elements: raw
+ name:
+ description:
+ - Enclosure name.
+ type: str
+ options:
+ description:
+ - 'List with options to gather additional information about an Enclosure and related resources. Options allowed: V(script),
+ V(environmentalConfiguration), and V(utilization). For the option V(utilization), you can provide specific parameters.'
+ type: list
+ elements: raw
extends_documentation_fragment:
- community.general.oneview
- community.general.oneview.factsparams
- community.general.attributes
- community.general.attributes.info_module
+"""
-'''
-
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Gather information about all Enclosures
community.general.oneview_enclosure_info:
hostname: 172.16.101.48
@@ -98,9 +95,9 @@ EXAMPLES = '''
community.general.oneview_enclosure_info:
name: Test-Enclosure
options:
- - script # optional
- - environmentalConfiguration # optional
- - utilization # optional
+ - script # optional
+ - environmentalConfiguration # optional
+ - utilization # optional
hostname: 172.16.101.48
username: administrator
password: my_password
@@ -126,11 +123,11 @@ EXAMPLES = '''
msg: "{{ result.enclosure_utilization }}"
- name: "Gather information about an Enclosure with temperature data at a resolution of one sample per day, between two
- specified dates"
+ specified dates"
community.general.oneview_enclosure_info:
name: Test-Enclosure
options:
- - utilization: # optional
+ - utilization: # optional
fields: AmbientTemperature
filter:
- startDate=2016-07-01T14:29:42.000Z
@@ -152,29 +149,29 @@ EXAMPLES = '''
- name: Print fetched information about Enclosure Utilization
ansible.builtin.debug:
msg: "{{ result.enclosure_utilization }}"
-'''
+"""
-RETURN = '''
+RETURN = r"""
enclosures:
- description: Has all the OneView information about the Enclosures.
- returned: Always, but can be null.
- type: dict
+ description: Has all the OneView information about the Enclosures.
+ returned: Always, but can be null.
+ type: dict
enclosure_script:
- description: Has all the OneView information about the script of an Enclosure.
- returned: When requested, but can be null.
- type: str
+ description: Has all the OneView information about the script of an Enclosure.
+ returned: When requested, but can be null.
+ type: str
enclosure_environmental_configuration:
- description: Has all the OneView information about the environmental configuration of an Enclosure.
- returned: When requested, but can be null.
- type: dict
+ description: Has all the OneView information about the environmental configuration of an Enclosure.
+ returned: When requested, but can be null.
+ type: dict
enclosure_utilization:
- description: Has all the OneView information about the utilization of an Enclosure.
- returned: When requested, but can be null.
- type: dict
-'''
+ description: Has all the OneView information about the utilization of an Enclosure.
+ returned: When requested, but can be null.
+ type: dict
+"""
from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase
diff --git a/plugins/modules/oneview_ethernet_network.py b/plugins/modules/oneview_ethernet_network.py
index 981d949cdc..7ba3abb6e4 100644
--- a/plugins/modules/oneview_ethernet_network.py
+++ b/plugins/modules/oneview_ethernet_network.py
@@ -7,46 +7,44 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
module: oneview_ethernet_network
short_description: Manage OneView Ethernet Network resources
description:
- - Provides an interface to manage Ethernet Network resources. Can create, update, or delete.
+ - Provides an interface to manage Ethernet Network resources. Can create, update, or delete.
requirements:
- - hpOneView >= 3.1.0
+ - hpOneView >= 3.1.0
author:
- - Felipe Bulsoni (@fgbulsoni)
- - Thiago Miotto (@tmiotto)
- - Adriane Cardozo (@adriane-cardozo)
+ - Felipe Bulsoni (@fgbulsoni)
+ - Thiago Miotto (@tmiotto)
+ - Adriane Cardozo (@adriane-cardozo)
attributes:
- check_mode:
- support: none
- diff_mode:
- support: none
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
options:
- state:
- description:
- - Indicates the desired state for the Ethernet Network resource.
- - V(present) will ensure data properties are compliant with OneView.
- - V(absent) will remove the resource from OneView, if it exists.
- - V(default_bandwidth_reset) will reset the network connection template to the default.
- type: str
- default: present
- choices: [present, absent, default_bandwidth_reset]
- data:
- description:
- - List with Ethernet Network properties.
- type: dict
- required: true
+ state:
+ description:
+ - Indicates the desired state for the Ethernet Network resource.
+ - V(present) ensures data properties are compliant with OneView.
+ - V(absent) removes the resource from OneView, if it exists.
+ - V(default_bandwidth_reset) resets the network connection template to the default.
+ type: str
+ default: present
+ choices: [present, absent, default_bandwidth_reset]
+ data:
+ description:
+ - List with Ethernet Network properties.
+ type: dict
+ required: true
extends_documentation_fragment:
- - community.general.oneview
- - community.general.oneview.validateetag
- - community.general.attributes
+ - community.general.oneview
+ - community.general.oneview.validateetag
+ - community.general.attributes
+"""
-'''
-
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Ensure that the Ethernet Network is present using the default configuration
community.general.oneview_ethernet_network:
config: '/etc/oneview/oneview_config.json'
@@ -64,8 +62,8 @@ EXAMPLES = '''
name: 'Test Ethernet Network'
purpose: Management
bandwidth:
- maximumBandwidth: 3000
- typicalBandwidth: 2000
+ maximumBandwidth: 3000
+ typicalBandwidth: 2000
delegate_to: localhost
- name: Ensure that the Ethernet Network is present with name 'Renamed Ethernet Network'
@@ -107,24 +105,24 @@ EXAMPLES = '''
data:
name: 'Test Ethernet Network'
delegate_to: localhost
-'''
+"""
-RETURN = '''
+RETURN = r"""
ethernet_network:
- description: Has the facts about the Ethernet Networks.
- returned: On state 'present'. Can be null.
- type: dict
+ description: Has the facts about the Ethernet Networks.
+ returned: On O(state=present). Can be null.
+ type: dict
ethernet_network_bulk:
- description: Has the facts about the Ethernet Networks affected by the bulk insert.
- returned: When 'vlanIdRange' attribute is in data argument. Can be null.
- type: dict
+ description: Has the facts about the Ethernet Networks affected by the bulk insert.
+ returned: When V(vlanIdRange) attribute is in O(data) argument. Can be null.
+ type: dict
ethernet_network_connection_template:
- description: Has the facts about the Ethernet Network Connection Template.
- returned: On state 'default_bandwidth_reset'. Can be null.
- type: dict
-'''
+ description: Has the facts about the Ethernet Network Connection Template.
+ returned: On O(state=default_bandwidth_reset). Can be null.
+ type: dict
+"""
from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase, OneViewModuleResourceNotFound
diff --git a/plugins/modules/oneview_ethernet_network_info.py b/plugins/modules/oneview_ethernet_network_info.py
index 7da008b04e..c1c0a327fe 100644
--- a/plugins/modules/oneview_ethernet_network_info.py
+++ b/plugins/modules/oneview_ethernet_network_info.py
@@ -7,42 +7,40 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
module: oneview_ethernet_network_info
short_description: Retrieve the information about one or more of the OneView Ethernet Networks
description:
- - Retrieve the information about one or more of the Ethernet Networks from OneView.
+ - Retrieve the information about one or more of the Ethernet Networks from OneView.
requirements:
- - hpOneView >= 2.0.1
+ - hpOneView >= 2.0.1
author:
- - Felipe Bulsoni (@fgbulsoni)
- - Thiago Miotto (@tmiotto)
- - Adriane Cardozo (@adriane-cardozo)
+ - Felipe Bulsoni (@fgbulsoni)
+ - Thiago Miotto (@tmiotto)
+ - Adriane Cardozo (@adriane-cardozo)
attributes:
- check_mode:
- version_added: 3.3.0
- # This was backported to 2.5.4 and 1.3.11 as well, since this was a bugfix
+ check_mode:
+ version_added: 3.3.0
+ # This was backported to 2.5.4 and 1.3.11 as well, since this was a bugfix
options:
- name:
- description:
- - Ethernet Network name.
- type: str
- options:
- description:
- - "List with options to gather additional information about an Ethernet Network and related resources.
- Options allowed: V(associatedProfiles) and V(associatedUplinkGroups)."
- type: list
- elements: str
+ name:
+ description:
+ - Ethernet Network name.
+ type: str
+ options:
+ description:
+ - 'List with options to gather additional information about an Ethernet Network and related resources. Options allowed:
+ V(associatedProfiles) and V(associatedUplinkGroups).'
+ type: list
+ elements: str
extends_documentation_fragment:
- community.general.oneview
- community.general.oneview.factsparams
- community.general.attributes
- community.general.attributes.info_module
+"""
-'''
-
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Gather information about all Ethernet Networks
community.general.oneview_ethernet_network_info:
config: /etc/oneview/oneview_config.json
@@ -96,24 +94,24 @@ EXAMPLES = '''
- name: Print fetched information about Ethernet Network Associated Uplink Groups
ansible.builtin.debug:
msg: "{{ result.enet_associated_uplink_groups }}"
-'''
+"""
-RETURN = '''
+RETURN = r"""
ethernet_networks:
- description: Has all the OneView information about the Ethernet Networks.
- returned: Always, but can be null.
- type: dict
+ description: Has all the OneView information about the Ethernet Networks.
+ returned: Always, but can be null.
+ type: dict
enet_associated_profiles:
- description: Has all the OneView information about the profiles which are using the Ethernet network.
- returned: When requested, but can be null.
- type: dict
+ description: Has all the OneView information about the profiles which are using the Ethernet network.
+ returned: When requested, but can be null.
+ type: dict
enet_associated_uplink_groups:
- description: Has all the OneView information about the uplink sets which are using the Ethernet network.
- returned: When requested, but can be null.
- type: dict
-'''
+ description: Has all the OneView information about the uplink sets which are using the Ethernet network.
+ returned: When requested, but can be null.
+ type: dict
+"""
from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase
diff --git a/plugins/modules/oneview_fc_network.py b/plugins/modules/oneview_fc_network.py
index 9f0c4358b7..3063e80757 100644
--- a/plugins/modules/oneview_fc_network.py
+++ b/plugins/modules/oneview_fc_network.py
@@ -7,43 +7,41 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
module: oneview_fc_network
short_description: Manage OneView Fibre Channel Network resources
description:
- - Provides an interface to manage Fibre Channel Network resources. Can create, update, and delete.
+ - Provides an interface to manage Fibre Channel Network resources. Can create, update, and delete.
requirements:
- - "hpOneView >= 4.0.0"
+ - "hpOneView >= 4.0.0"
author: "Felipe Bulsoni (@fgbulsoni)"
attributes:
- check_mode:
- support: none
- diff_mode:
- support: none
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
options:
- state:
- description:
- - Indicates the desired state for the Fibre Channel Network resource.
- V(present) will ensure data properties are compliant with OneView.
- V(absent) will remove the resource from OneView, if it exists.
- type: str
- choices: ['present', 'absent']
- required: true
- data:
- description:
- - List with the Fibre Channel Network properties.
- type: dict
- required: true
+ state:
+ description:
+ - Indicates the desired state for the Fibre Channel Network resource.
+ - V(present) ensures data properties are compliant with OneView.
+ - V(absent) removes the resource from OneView, if it exists.
+ type: str
+ choices: ['present', 'absent']
+ required: true
+ data:
+ description:
+ - List with the Fibre Channel Network properties.
+ type: dict
+ required: true
extends_documentation_fragment:
- - community.general.oneview
- - community.general.oneview.validateetag
- - community.general.attributes
+ - community.general.oneview
+ - community.general.oneview.validateetag
+ - community.general.attributes
+"""
-'''
-
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Ensure that the Fibre Channel Network is present using the default configuration
community.general.oneview_fc_network:
config: "{{ config_file_path }}"
@@ -75,14 +73,14 @@ EXAMPLES = '''
state: absent
data:
name: 'New FC Network'
-'''
+"""
-RETURN = '''
+RETURN = r"""
fc_network:
- description: Has the facts about the managed OneView FC Network.
- returned: On state 'present'. Can be null.
- type: dict
-'''
+ description: Has the facts about the managed OneView FC Network.
+ returned: On O(state=present). Can be null.
+ type: dict
+"""
from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase
diff --git a/plugins/modules/oneview_fc_network_info.py b/plugins/modules/oneview_fc_network_info.py
index 096af48308..9de22ef55c 100644
--- a/plugins/modules/oneview_fc_network_info.py
+++ b/plugins/modules/oneview_fc_network_info.py
@@ -7,37 +7,35 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
module: oneview_fc_network_info
short_description: Retrieve the information about one or more of the OneView Fibre Channel Networks
description:
- - Retrieve the information about one or more of the Fibre Channel Networks from OneView.
+ - Retrieve the information about one or more of the Fibre Channel Networks from OneView.
requirements:
- - hpOneView >= 2.0.1
+ - hpOneView >= 2.0.1
author:
- - Felipe Bulsoni (@fgbulsoni)
- - Thiago Miotto (@tmiotto)
- - Adriane Cardozo (@adriane-cardozo)
+ - Felipe Bulsoni (@fgbulsoni)
+ - Thiago Miotto (@tmiotto)
+ - Adriane Cardozo (@adriane-cardozo)
attributes:
- check_mode:
- version_added: 3.3.0
- # This was backported to 2.5.4 and 1.3.11 as well, since this was a bugfix
+ check_mode:
+ version_added: 3.3.0
+ # This was backported to 2.5.4 and 1.3.11 as well, since this was a bugfix
options:
- name:
- description:
- - Fibre Channel Network name.
- type: str
+ name:
+ description:
+ - Fibre Channel Network name.
+ type: str
extends_documentation_fragment:
- community.general.oneview
- community.general.oneview.factsparams
- community.general.attributes
- community.general.attributes.info_module
+"""
-'''
-
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Gather information about all Fibre Channel Networks
community.general.oneview_fc_network_info:
config: /etc/oneview/oneview_config.json
@@ -73,14 +71,14 @@ EXAMPLES = '''
- name: Print fetched information about Fibre Channel Network found by name
ansible.builtin.debug:
msg: "{{ result.fc_networks }}"
-'''
+"""
-RETURN = '''
+RETURN = r"""
fc_networks:
- description: Has all the OneView information about the Fibre Channel Networks.
- returned: Always, but can be null.
- type: dict
-'''
+ description: Has all the OneView information about the Fibre Channel Networks.
+ returned: Always, but can be null.
+ type: dict
+"""
from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase
@@ -89,8 +87,8 @@ class FcNetworkInfoModule(OneViewModuleBase):
def __init__(self):
argument_spec = dict(
- name=dict(required=False, type='str'),
- params=dict(required=False, type='dict')
+ name=dict(type='str'),
+ params=dict(type='dict')
)
super(FcNetworkInfoModule, self).__init__(
diff --git a/plugins/modules/oneview_fcoe_network.py b/plugins/modules/oneview_fcoe_network.py
index e1216b1d95..37fbff9ef4 100644
--- a/plugins/modules/oneview_fcoe_network.py
+++ b/plugins/modules/oneview_fcoe_network.py
@@ -7,44 +7,42 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
module: oneview_fcoe_network
short_description: Manage OneView FCoE Network resources
description:
- - Provides an interface to manage FCoE Network resources. Can create, update, or delete.
+ - Provides an interface to manage FCoE Network resources. Can create, update, or delete.
requirements:
- - "Python >= 2.7.9"
- - "hpOneView >= 4.0.0"
+ - "Python >= 2.7.9"
+ - "hpOneView >= 4.0.0"
author: "Felipe Bulsoni (@fgbulsoni)"
attributes:
- check_mode:
- support: none
- diff_mode:
- support: none
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
options:
- state:
- description:
- - Indicates the desired state for the FCoE Network resource.
- V(present) will ensure data properties are compliant with OneView.
- V(absent) will remove the resource from OneView, if it exists.
- type: str
- default: present
- choices: ['present', 'absent']
- data:
- description:
- - List with FCoE Network properties.
- type: dict
- required: true
+ state:
+ description:
+ - Indicates the desired state for the FCoE Network resource.
+ - V(present) ensures data properties are compliant with OneView.
+ - V(absent) removes the resource from OneView, if it exists.
+ type: str
+ default: present
+ choices: ['present', 'absent']
+ data:
+ description:
+ - List with FCoE Network properties.
+ type: dict
+ required: true
extends_documentation_fragment:
- - community.general.oneview
- - community.general.oneview.validateetag
- - community.general.attributes
+ - community.general.oneview
+ - community.general.oneview.validateetag
+ - community.general.attributes
+"""
-'''
-
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Ensure that FCoE Network is present using the default configuration
community.general.oneview_fcoe_network:
config: '/etc/oneview/oneview_config.json'
@@ -72,14 +70,14 @@ EXAMPLES = '''
data:
name: New FCoE Network
delegate_to: localhost
-'''
+"""
-RETURN = '''
+RETURN = r"""
fcoe_network:
- description: Has the facts about the OneView FCoE Networks.
- returned: On state 'present'. Can be null.
- type: dict
-'''
+ description: Has the facts about the OneView FCoE Networks.
+ returned: On O(state=present). Can be null.
+ type: dict
+"""
from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase
diff --git a/plugins/modules/oneview_fcoe_network_info.py b/plugins/modules/oneview_fcoe_network_info.py
index b3460d59aa..6d5074be4a 100644
--- a/plugins/modules/oneview_fcoe_network_info.py
+++ b/plugins/modules/oneview_fcoe_network_info.py
@@ -7,36 +7,34 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
module: oneview_fcoe_network_info
short_description: Retrieve the information about one or more of the OneView FCoE Networks
description:
- - Retrieve the information about one or more of the FCoE Networks from OneView.
+ - Retrieve the information about one or more of the FCoE Networks from OneView.
requirements:
- - hpOneView >= 2.0.1
+ - hpOneView >= 2.0.1
author:
- - Felipe Bulsoni (@fgbulsoni)
- - Thiago Miotto (@tmiotto)
- - Adriane Cardozo (@adriane-cardozo)
+ - Felipe Bulsoni (@fgbulsoni)
+ - Thiago Miotto (@tmiotto)
+ - Adriane Cardozo (@adriane-cardozo)
attributes:
- check_mode:
- version_added: 3.3.0
- # This was backported to 2.5.4 and 1.3.11 as well, since this was a bugfix
+ check_mode:
+ version_added: 3.3.0
+ # This was backported to 2.5.4 and 1.3.11 as well, since this was a bugfix
options:
- name:
- description:
- - FCoE Network name.
- type: str
+ name:
+ description:
+ - FCoE Network name.
+ type: str
extends_documentation_fragment:
- community.general.oneview
- community.general.oneview.factsparams
- community.general.attributes
- community.general.attributes.info_module
+"""
-'''
-
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Gather information about all FCoE Networks
community.general.oneview_fcoe_network_info:
config: /etc/oneview/oneview_config.json
@@ -72,14 +70,14 @@ EXAMPLES = '''
- name: Print fetched information about FCoE Network found by name
ansible.builtin.debug:
msg: "{{ result.fcoe_networks }}"
-'''
+"""
-RETURN = '''
+RETURN = r"""
fcoe_networks:
- description: Has all the OneView information about the FCoE Networks.
- returned: Always, but can be null.
- type: dict
-'''
+ description: Has all the OneView information about the FCoE Networks.
+ returned: Always, but can be null.
+ type: dict
+"""
from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase
diff --git a/plugins/modules/oneview_logical_interconnect_group.py b/plugins/modules/oneview_logical_interconnect_group.py
index d1303f011a..2683fc5468 100644
--- a/plugins/modules/oneview_logical_interconnect_group.py
+++ b/plugins/modules/oneview_logical_interconnect_group.py
@@ -8,45 +8,43 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
module: oneview_logical_interconnect_group
short_description: Manage OneView Logical Interconnect Group resources
description:
- - Provides an interface to manage Logical Interconnect Group resources. Can create, update, or delete.
+ - Provides an interface to manage Logical Interconnect Group resources. Can create, update, or delete.
requirements:
- - hpOneView >= 4.0.0
+ - hpOneView >= 4.0.0
author:
- - Felipe Bulsoni (@fgbulsoni)
- - Thiago Miotto (@tmiotto)
- - Adriane Cardozo (@adriane-cardozo)
+ - Felipe Bulsoni (@fgbulsoni)
+ - Thiago Miotto (@tmiotto)
+ - Adriane Cardozo (@adriane-cardozo)
attributes:
- check_mode:
- support: none
- diff_mode:
- support: none
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
options:
- state:
- description:
- - Indicates the desired state for the Logical Interconnect Group resource.
- V(absent) will remove the resource from OneView, if it exists.
- V(present) will ensure data properties are compliant with OneView.
- type: str
- choices: [absent, present]
- default: present
- data:
- description:
- - List with the Logical Interconnect Group properties.
- type: dict
- required: true
+ state:
+ description:
+ - Indicates the desired state for the Logical Interconnect Group resource.
+ - V(absent) removes the resource from OneView, if it exists.
+ - V(present) ensures data properties are compliant with OneView.
+ type: str
+ choices: [absent, present]
+ default: present
+ data:
+ description:
+ - List with the Logical Interconnect Group properties.
+ type: dict
+ required: true
extends_documentation_fragment:
- - community.general.oneview
- - community.general.oneview.validateetag
- - community.general.attributes
+ - community.general.oneview
+ - community.general.oneview.validateetag
+ - community.general.attributes
+"""
-'''
-
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Ensure that the Logical Interconnect Group is present
community.general.oneview_logical_interconnect_group:
config: /etc/oneview/oneview_config.json
@@ -57,13 +55,13 @@ EXAMPLES = '''
enclosureType: C7000
interconnectMapTemplate:
interconnectMapEntryTemplates:
- - logicalDownlinkUri: ~
+ - logicalDownlinkUri:
logicalLocation:
- locationEntries:
- - relativeValue: 1
- type: Bay
- - relativeValue: 1
- type: Enclosure
+ locationEntries:
+ - relativeValue: 1
+ type: Bay
+ - relativeValue: 1
+ type: Enclosure
permittedInterconnectTypeName: HP VC Flex-10/10D Module
# Alternatively you can inform permittedInterconnectTypeUri
delegate_to: localhost
@@ -95,14 +93,14 @@ EXAMPLES = '''
data:
name: New Logical Interconnect Group
delegate_to: localhost
-'''
+"""
-RETURN = '''
+RETURN = r"""
logical_interconnect_group:
- description: Has the facts about the OneView Logical Interconnect Group.
- returned: On state 'present'. Can be null.
- type: dict
-'''
+ description: Has the facts about the OneView Logical Interconnect Group.
+ returned: On O(state=present). Can be null.
+ type: dict
+"""
from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase, OneViewModuleResourceNotFound
diff --git a/plugins/modules/oneview_logical_interconnect_group_info.py b/plugins/modules/oneview_logical_interconnect_group_info.py
index 6f6a908f29..1c9e415d0e 100644
--- a/plugins/modules/oneview_logical_interconnect_group_info.py
+++ b/plugins/modules/oneview_logical_interconnect_group_info.py
@@ -8,36 +8,34 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
module: oneview_logical_interconnect_group_info
short_description: Retrieve information about one or more of the OneView Logical Interconnect Groups
description:
- - Retrieve information about one or more of the Logical Interconnect Groups from OneView
+ - Retrieve information about one or more of the Logical Interconnect Groups from OneView.
requirements:
- - hpOneView >= 2.0.1
+ - hpOneView >= 2.0.1
author:
- - Felipe Bulsoni (@fgbulsoni)
- - Thiago Miotto (@tmiotto)
- - Adriane Cardozo (@adriane-cardozo)
+ - Felipe Bulsoni (@fgbulsoni)
+ - Thiago Miotto (@tmiotto)
+ - Adriane Cardozo (@adriane-cardozo)
attributes:
- check_mode:
- version_added: 3.3.0
- # This was backported to 2.5.4 and 1.3.11 as well, since this was a bugfix
+ check_mode:
+ version_added: 3.3.0
+ # This was backported to 2.5.4 and 1.3.11 as well, since this was a bugfix
options:
- name:
- description:
- - Logical Interconnect Group name.
- type: str
+ name:
+ description:
+ - Logical Interconnect Group name.
+ type: str
extends_documentation_fragment:
- community.general.oneview
- community.general.oneview.factsparams
- community.general.attributes
- community.general.attributes.info_module
+"""
-'''
-
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Gather information about all Logical Interconnect Groups
community.general.oneview_logical_interconnect_group_info:
hostname: 172.16.101.48
@@ -85,14 +83,14 @@ EXAMPLES = '''
- name: Print fetched information about Logical Interconnect Group found by name
ansible.builtin.debug:
msg: "{{ result.logical_interconnect_groups }}"
-'''
+"""
-RETURN = '''
+RETURN = r"""
logical_interconnect_groups:
- description: Has all the OneView information about the Logical Interconnect Groups.
- returned: Always, but can be null.
- type: dict
-'''
+ description: Has all the OneView information about the Logical Interconnect Groups.
+ returned: Always, but can be null.
+ type: dict
+"""
from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase
diff --git a/plugins/modules/oneview_network_set.py b/plugins/modules/oneview_network_set.py
index 0efd417d63..ee5d3560a7 100644
--- a/plugins/modules/oneview_network_set.py
+++ b/plugins/modules/oneview_network_set.py
@@ -7,46 +7,44 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
module: oneview_network_set
short_description: Manage HPE OneView Network Set resources
description:
- - Provides an interface to manage Network Set resources. Can create, update, or delete.
+ - Provides an interface to manage Network Set resources. Can create, update, or delete.
requirements:
- - hpOneView >= 4.0.0
+ - hpOneView >= 4.0.0
author:
- - Felipe Bulsoni (@fgbulsoni)
- - Thiago Miotto (@tmiotto)
- - Adriane Cardozo (@adriane-cardozo)
+ - Felipe Bulsoni (@fgbulsoni)
+ - Thiago Miotto (@tmiotto)
+ - Adriane Cardozo (@adriane-cardozo)
attributes:
- check_mode:
- support: none
- diff_mode:
- support: none
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
options:
- state:
- description:
- - Indicates the desired state for the Network Set resource.
- - V(present) will ensure data properties are compliant with OneView.
- - V(absent) will remove the resource from OneView, if it exists.
- type: str
- default: present
- choices: ['present', 'absent']
- data:
- description:
- - List with the Network Set properties.
- type: dict
- required: true
+ state:
+ description:
+ - Indicates the desired state for the Network Set resource.
+ - V(present) ensures data properties are compliant with OneView.
+ - V(absent) removes the resource from OneView, if it exists.
+ type: str
+ default: present
+ choices: ['present', 'absent']
+ data:
+ description:
+ - List with the Network Set properties.
+ type: dict
+ required: true
extends_documentation_fragment:
- - community.general.oneview
- - community.general.oneview.validateetag
- - community.general.attributes
+ - community.general.oneview
+ - community.general.oneview.validateetag
+ - community.general.attributes
+"""
-'''
-
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Create a Network Set
community.general.oneview_network_set:
config: /etc/oneview/oneview_config.json
@@ -54,8 +52,8 @@ EXAMPLES = '''
data:
name: OneViewSDK Test Network Set
networkUris:
- - Test Ethernet Network_1 # can be a name
- - /rest/ethernet-networks/e4360c9d-051d-4931-b2aa-7de846450dd8 # or a URI
+ - Test Ethernet Network_1 # can be a name
+ - /rest/ethernet-networks/e4360c9d-051d-4931-b2aa-7de846450dd8 # or a URI
delegate_to: localhost
- name: Update the Network Set name to 'OneViewSDK Test Network Set - Renamed' and change the associated networks
@@ -74,7 +72,7 @@ EXAMPLES = '''
config: /etc/oneview/oneview_config.json
state: absent
data:
- name: OneViewSDK Test Network Set - Renamed
+ name: OneViewSDK Test Network Set - Renamed
delegate_to: localhost
- name: Update the Network set with two scopes
@@ -87,14 +85,14 @@ EXAMPLES = '''
- /rest/scopes/01SC123456
- /rest/scopes/02SC123456
delegate_to: localhost
-'''
+"""
-RETURN = '''
+RETURN = r"""
network_set:
- description: Has the facts about the Network Set.
- returned: On state 'present', but can be null.
- type: dict
-'''
+ description: Has the facts about the Network Set.
+ returned: On O(state=present), but can be null.
+ type: dict
+"""
from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase, OneViewModuleResourceNotFound
diff --git a/plugins/modules/oneview_network_set_info.py b/plugins/modules/oneview_network_set_info.py
index cef53d8fcd..51e7d0b510 100644
--- a/plugins/modules/oneview_network_set_info.py
+++ b/plugins/modules/oneview_network_set_info.py
@@ -7,45 +7,42 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
module: oneview_network_set_info
short_description: Retrieve information about the OneView Network Sets
description:
- - Retrieve information about the Network Sets from OneView.
+ - Retrieve information about the Network Sets from OneView.
requirements:
- - hpOneView >= 2.0.1
+ - hpOneView >= 2.0.1
author:
- - Felipe Bulsoni (@fgbulsoni)
- - Thiago Miotto (@tmiotto)
- - Adriane Cardozo (@adriane-cardozo)
+ - Felipe Bulsoni (@fgbulsoni)
+ - Thiago Miotto (@tmiotto)
+ - Adriane Cardozo (@adriane-cardozo)
attributes:
- check_mode:
- version_added: 3.3.0
- # This was backported to 2.5.4 and 1.3.11 as well, since this was a bugfix
+ check_mode:
+ version_added: 3.3.0
+ # This was backported to 2.5.4 and 1.3.11 as well, since this was a bugfix
options:
- name:
- description:
- - Network Set name.
- type: str
+ name:
+ description:
+ - Network Set name.
+ type: str
- options:
- description:
- - "List with options to gather information about Network Set.
- Option allowed: V(withoutEthernet).
- The option V(withoutEthernet) retrieves the list of network_sets excluding Ethernet networks."
- type: list
- elements: str
+ options:
+ description:
+ - 'List with options to gather information about Network Set. Option allowed: V(withoutEthernet). The option V(withoutEthernet)
+ retrieves the list of network_sets excluding Ethernet networks.'
+ type: list
+ elements: str
extends_documentation_fragment:
- community.general.oneview
- community.general.oneview.factsparams
- community.general.attributes
- community.general.attributes.info_module
+"""
-'''
-
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Gather information about all Network Sets
community.general.oneview_network_set_info:
hostname: 172.16.101.48
@@ -86,7 +83,7 @@ EXAMPLES = '''
password: my_password
api_version: 500
options:
- - withoutEthernet
+ - withoutEthernet
no_log: true
delegate_to: localhost
register: result
@@ -118,7 +115,7 @@ EXAMPLES = '''
api_version: 500
name: Name of the Network Set
options:
- - withoutEthernet
+ - withoutEthernet
no_log: true
delegate_to: localhost
register: result
@@ -126,14 +123,14 @@ EXAMPLES = '''
- name: Print fetched information about Network Set found by name, excluding Ethernet networks
ansible.builtin.debug:
msg: "{{ result.network_sets }}"
-'''
+"""
-RETURN = '''
+RETURN = r"""
network_sets:
- description: Has all the OneView information about the Network Sets.
- returned: Always, but can be empty.
- type: dict
-'''
+ description: Has all the OneView information about the Network Sets.
+ returned: Always, but can be empty.
+ type: dict
+"""
from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase
diff --git a/plugins/modules/oneview_san_manager.py b/plugins/modules/oneview_san_manager.py
index 15282aec21..23732cdaaf 100644
--- a/plugins/modules/oneview_san_manager.py
+++ b/plugins/modules/oneview_san_manager.py
@@ -7,47 +7,45 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
module: oneview_san_manager
short_description: Manage OneView SAN Manager resources
description:
- - Provides an interface to manage SAN Manager resources. Can create, update, or delete.
+ - Provides an interface to manage SAN Manager resources. Can create, update, or delete.
requirements:
- - hpOneView >= 3.1.1
+ - hpOneView >= 3.1.1
author:
- - Felipe Bulsoni (@fgbulsoni)
- - Thiago Miotto (@tmiotto)
- - Adriane Cardozo (@adriane-cardozo)
+ - Felipe Bulsoni (@fgbulsoni)
+ - Thiago Miotto (@tmiotto)
+ - Adriane Cardozo (@adriane-cardozo)
attributes:
- check_mode:
- support: none
- diff_mode:
- support: none
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
options:
- state:
- description:
- - Indicates the desired state for the Uplink Set resource.
- - V(present) ensures data properties are compliant with OneView.
- - V(absent) removes the resource from OneView, if it exists.
- - V(connection_information_set) updates the connection information for the SAN Manager. This operation is non-idempotent.
- type: str
- default: present
- choices: [present, absent, connection_information_set]
- data:
- description:
- - List with SAN Manager properties.
- type: dict
- required: true
+ state:
+ description:
+ - Indicates the desired state for the Uplink Set resource.
+ - V(present) ensures data properties are compliant with OneView.
+ - V(absent) removes the resource from OneView, if it exists.
+ - V(connection_information_set) updates the connection information for the SAN Manager. This operation is non-idempotent.
+ type: str
+ default: present
+ choices: [present, absent, connection_information_set]
+ data:
+ description:
+ - List with SAN Manager properties.
+ type: dict
+ required: true
extends_documentation_fragment:
- - community.general.oneview
- - community.general.oneview.validateetag
- - community.general.attributes
+ - community.general.oneview
+ - community.general.oneview.validateetag
+ - community.general.attributes
+"""
-'''
-
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Creates a Device Manager for the Brocade SAN provider with the given hostname and credentials
community.general.oneview_san_manager:
config: /etc/oneview/oneview_config.json
@@ -123,14 +121,14 @@ EXAMPLES = '''
data:
name: '172.18.15.1'
delegate_to: localhost
-'''
+"""
-RETURN = '''
+RETURN = r"""
san_manager:
- description: Has the OneView facts about the SAN Manager.
- returned: On state 'present'. Can be null.
- type: dict
-'''
+ description: Has the OneView facts about the SAN Manager.
+ returned: On O(state=present). Can be null.
+ type: dict
+"""
from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase, OneViewModuleValueError
diff --git a/plugins/modules/oneview_san_manager_info.py b/plugins/modules/oneview_san_manager_info.py
index f994280ca8..63797e298e 100644
--- a/plugins/modules/oneview_san_manager_info.py
+++ b/plugins/modules/oneview_san_manager_info.py
@@ -7,44 +7,42 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
module: oneview_san_manager_info
short_description: Retrieve information about one or more of the OneView SAN Managers
description:
- - Retrieve information about one or more of the SAN Managers from OneView
+ - Retrieve information about one or more of the SAN Managers from OneView.
requirements:
- - hpOneView >= 2.0.1
+ - hpOneView >= 2.0.1
author:
- - Felipe Bulsoni (@fgbulsoni)
- - Thiago Miotto (@tmiotto)
- - Adriane Cardozo (@adriane-cardozo)
+ - Felipe Bulsoni (@fgbulsoni)
+ - Thiago Miotto (@tmiotto)
+ - Adriane Cardozo (@adriane-cardozo)
attributes:
- check_mode:
- version_added: 3.3.0
- # This was backported to 2.5.4 and 1.3.11 as well, since this was a bugfix
+ check_mode:
+ version_added: 3.3.0
+ # This was backported to 2.5.4 and 1.3.11 as well, since this was a bugfix
options:
- provider_display_name:
- description:
- - Provider Display Name.
- type: str
- params:
- description:
- - List of params to delimit, filter and sort the list of resources.
- - "params allowed:
- - V(start): The first item to return, using 0-based indexing.
- - V(count): The number of resources to return.
- - V(query): A general query string to narrow the list of resources returned.
- - V(sort): The sort order of the returned data set."
- type: dict
+ provider_display_name:
+ description:
+ - Provider Display Name.
+ type: str
+ params:
+ description:
+ - List of params to delimit, filter and sort the list of resources.
+ - 'Params allowed:'
+ - 'V(start): The first item to return, using 0-based indexing.'
+ - 'V(count): The number of resources to return.'
+ - 'V(query): A general query string to narrow the list of resources returned.'
+ - 'V(sort): The sort order of the returned data set.'
+ type: dict
extends_documentation_fragment:
- community.general.oneview
- community.general.attributes
- community.general.attributes.info_module
+"""
-'''
-
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Gather information about all SAN Managers
community.general.oneview_san_manager_info:
config: /etc/oneview/oneview_config.json
@@ -80,14 +78,14 @@ EXAMPLES = '''
- name: Print fetched information about SAN Manager found by provider display name
ansible.builtin.debug:
msg: "{{ result.san_managers }}"
-'''
+"""
-RETURN = '''
+RETURN = r"""
san_managers:
- description: Has all the OneView information about the SAN Managers.
- returned: Always, but can be null.
- type: dict
-'''
+ description: Has all the OneView information about the SAN Managers.
+ returned: Always, but can be null.
+ type: dict
+"""
from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase
diff --git a/plugins/modules/online_server_info.py b/plugins/modules/online_server_info.py
index f6d03cb275..a06dae1926 100644
--- a/plugins/modules/online_server_info.py
+++ b/plugins/modules/online_server_info.py
@@ -8,23 +8,21 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
-DOCUMENTATION = r'''
----
+DOCUMENTATION = r"""
module: online_server_info
short_description: Gather information about Online servers
description:
- Gather information about the servers.
- - U(https://www.online.net/en/dedicated-server)
+ - U(https://www.online.net/en/dedicated-server).
author:
- "Remy Leone (@remyleone)"
extends_documentation_fragment:
- community.general.online
- community.general.attributes
- community.general.attributes.info_module
+"""
-'''
-
-EXAMPLES = r'''
+EXAMPLES = r"""
- name: Gather Online server information
community.general.online_server_info:
api_token: '0d1627e8-bbf0-44c5-a46f-5c4d3aef033f'
@@ -32,105 +30,105 @@ EXAMPLES = r'''
- ansible.builtin.debug:
msg: "{{ result.online_server_info }}"
-'''
+"""
-RETURN = r'''
+RETURN = r"""
online_server_info:
description:
- Response from Online API.
- - "For more details please refer to: U(https://console.online.net/en/api/)."
+ - 'For more details please refer to: U(https://console.online.net/en/api/).'
returned: success
type: list
elements: dict
sample:
- "online_server_info": [
- {
- "abuse": "abuse@example.com",
- "anti_ddos": false,
- "bmc": {
- "session_key": null
- },
- "boot_mode": "normal",
- "contacts": {
- "owner": "foobar",
- "tech": "foobar"
- },
+ [
+ {
+ "abuse": "abuse@example.com",
+ "anti_ddos": false,
+ "bmc": {
+ "session_key": null
+ },
+ "boot_mode": "normal",
+ "contacts": {
+ "owner": "foobar",
+ "tech": "foobar"
+ },
+ "disks": [
+ {
+ "$ref": "/api/v1/server/hardware/disk/68452"
+ },
+ {
+ "$ref": "/api/v1/server/hardware/disk/68453"
+ }
+ ],
+ "drive_arrays": [
+ {
"disks": [
- {
- "$ref": "/api/v1/server/hardware/disk/68452"
- },
- {
- "$ref": "/api/v1/server/hardware/disk/68453"
- }
+ {
+ "$ref": "/api/v1/server/hardware/disk/68452"
+ },
+ {
+ "$ref": "/api/v1/server/hardware/disk/68453"
+ }
],
- "drive_arrays": [
- {
- "disks": [
- {
- "$ref": "/api/v1/server/hardware/disk/68452"
- },
- {
- "$ref": "/api/v1/server/hardware/disk/68453"
- }
- ],
- "raid_controller": {
- "$ref": "/api/v1/server/hardware/raidController/9910"
- },
- "raid_level": "RAID1"
- }
- ],
- "hardware_watch": true,
- "hostname": "sd-42",
- "id": 42,
- "ip": [
- {
- "address": "195.154.172.149",
- "mac": "28:92:4a:33:5e:c6",
- "reverse": "195-154-172-149.rev.poneytelecom.eu.",
- "switch_port_state": "up",
- "type": "public"
- },
- {
- "address": "10.90.53.212",
- "mac": "28:92:4a:33:5e:c7",
- "reverse": null,
- "switch_port_state": "up",
- "type": "private"
- }
- ],
- "last_reboot": "2018-08-23T08:32:03.000Z",
- "location": {
- "block": "A",
- "datacenter": "DC3",
- "position": 19,
- "rack": "A23",
- "room": "4 4-4"
+ "raid_controller": {
+ "$ref": "/api/v1/server/hardware/raidController/9910"
},
- "network": {
- "ip": [
- "195.154.172.149"
- ],
- "ipfo": [],
- "private": [
- "10.90.53.212"
- ]
- },
- "offer": "Pro-1-S-SATA",
- "os": {
- "name": "FreeBSD",
- "version": "11.1-RELEASE"
- },
- "power": "ON",
- "proactive_monitoring": false,
- "raid_controllers": [
- {
- "$ref": "/api/v1/server/hardware/raidController/9910"
- }
- ],
- "support": "Basic service level"
- }
+ "raid_level": "RAID1"
+ }
+ ],
+ "hardware_watch": true,
+ "hostname": "sd-42",
+ "id": 42,
+ "ip": [
+ {
+ "address": "195.154.172.149",
+ "mac": "28:92:4a:33:5e:c6",
+ "reverse": "195-154-172-149.rev.poneytelecom.eu.",
+ "switch_port_state": "up",
+ "type": "public"
+ },
+ {
+ "address": "10.90.53.212",
+ "mac": "28:92:4a:33:5e:c7",
+ "reverse": null,
+ "switch_port_state": "up",
+ "type": "private"
+ }
+ ],
+ "last_reboot": "2018-08-23T08:32:03.000Z",
+ "location": {
+ "block": "A",
+ "datacenter": "DC3",
+ "position": 19,
+ "rack": "A23",
+ "room": "4 4-4"
+ },
+ "network": {
+ "ip": [
+ "195.154.172.149"
+ ],
+ "ipfo": [],
+ "private": [
+ "10.90.53.212"
+ ]
+ },
+ "offer": "Pro-1-S-SATA",
+ "os": {
+ "name": "FreeBSD",
+ "version": "11.1-RELEASE"
+ },
+ "power": "ON",
+ "proactive_monitoring": false,
+ "raid_controllers": [
+ {
+ "$ref": "/api/v1/server/hardware/raidController/9910"
+ }
+ ],
+ "support": "Basic service level"
+ }
]
-'''
+"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.general.plugins.module_utils.online import (
diff --git a/plugins/modules/online_user_info.py b/plugins/modules/online_user_info.py
index 1d91418caf..5b1628adad 100644
--- a/plugins/modules/online_user_info.py
+++ b/plugins/modules/online_user_info.py
@@ -7,7 +7,7 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
-DOCUMENTATION = r'''
+DOCUMENTATION = r"""
module: online_user_info
short_description: Gather information about Online user
description:
@@ -18,34 +18,34 @@ extends_documentation_fragment:
- community.general.online
- community.general.attributes
- community.general.attributes.info_module
-'''
+"""
-EXAMPLES = r'''
+EXAMPLES = r"""
- name: Gather Online user info
community.general.online_user_info:
register: result
- ansible.builtin.debug:
msg: "{{ result.online_user_info }}"
-'''
+"""
-RETURN = r'''
+RETURN = r"""
online_user_info:
description:
- Response from Online API.
- - "For more details please refer to: U(https://console.online.net/en/api/)."
+ - 'For more details please refer to: U(https://console.online.net/en/api/).'
returned: success
type: dict
sample:
- "online_user_info": {
- "company": "foobar LLC",
- "email": "foobar@example.com",
- "first_name": "foo",
- "id": 42,
- "last_name": "bar",
- "login": "foobar"
+ {
+ "company": "foobar LLC",
+ "email": "foobar@example.com",
+ "first_name": "foo",
+ "id": 42,
+ "last_name": "bar",
+ "login": "foobar"
}
-'''
+"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.general.plugins.module_utils.online import (
diff --git a/plugins/modules/open_iscsi.py b/plugins/modules/open_iscsi.py
index df8a694a7e..80360833a2 100644
--- a/plugins/modules/open_iscsi.py
+++ b/plugins/modules/open_iscsi.py
@@ -8,103 +8,100 @@
from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = r'''
----
+DOCUMENTATION = r"""
module: open_iscsi
author:
- - Serge van Ginderachter (@srvg)
+ - Serge van Ginderachter (@srvg)
short_description: Manage iSCSI targets with Open-iSCSI
description:
- - Discover targets on given portal, (dis)connect targets, mark targets to
- manually or auto start, return device nodes of connected targets.
+ - Discover targets on given portal, (dis)connect targets, mark targets to manually or auto start, return device nodes of
+ connected targets.
requirements:
- - open_iscsi library and tools (iscsiadm)
+ - open_iscsi library and tools (iscsiadm)
extends_documentation_fragment:
- - community.general.attributes
+ - community.general.attributes
attributes:
- check_mode:
- support: full
- diff_mode:
- support: none
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
options:
- portal:
- description:
- - The domain name or IP address of the iSCSI target.
- type: str
- aliases: [ ip ]
- port:
- description:
- - The port on which the iSCSI target process listens.
- type: str
- default: '3260'
- target:
- description:
- - The iSCSI target name.
- type: str
- aliases: [ name, targetname ]
- login:
- description:
- - Whether the target node should be connected.
- - When O(target) is omitted, will login to all available.
- type: bool
- aliases: [ state ]
- node_auth:
- description:
- - The value for C(node.session.auth.authmethod).
- type: str
- default: CHAP
- node_user:
- description:
- - The value for C(node.session.auth.username).
- type: str
- node_pass:
- description:
- - The value for C(node.session.auth.password).
- type: str
- node_user_in:
- description:
- - The value for C(node.session.auth.username_in).
- type: str
- version_added: 3.8.0
- node_pass_in:
- description:
- - The value for C(node.session.auth.password_in).
- type: str
- version_added: 3.8.0
- auto_node_startup:
- description:
- - Whether the target node should be automatically connected at startup.
- type: bool
- aliases: [ automatic ]
- auto_portal_startup:
- description:
- - Whether the target node portal should be automatically connected at startup.
- type: bool
- version_added: 3.2.0
- discover:
- description:
- - Whether the list of target nodes on the portal should be
- (re)discovered and added to the persistent iSCSI database.
- - Keep in mind that C(iscsiadm) discovery resets configuration, like C(node.startup)
- to manual, hence combined with O(auto_node_startup=true) will always return
- a changed state.
- type: bool
- default: false
- show_nodes:
- description:
- - Whether the list of nodes in the persistent iSCSI database should be returned by the module.
- type: bool
- default: false
- rescan:
- description:
- - Rescan an established session for discovering new targets.
- - When O(target) is omitted, will rescan all sessions.
- type: bool
- default: false
- version_added: 4.1.0
-'''
+ portal:
+ description:
+ - The domain name or IP address of the iSCSI target.
+ type: str
+ aliases: [ip]
+ port:
+ description:
+ - The port on which the iSCSI target process listens.
+ type: str
+ default: '3260'
+ target:
+ description:
+ - The iSCSI target name.
+ type: str
+ aliases: [name, targetname]
+ login:
+ description:
+ - Whether the target node should be connected.
+ - When O(target) is omitted, it logins to all available.
+ type: bool
+ aliases: [state]
+ node_auth:
+ description:
+ - The value for C(node.session.auth.authmethod).
+ type: str
+ default: CHAP
+ node_user:
+ description:
+ - The value for C(node.session.auth.username).
+ type: str
+ node_pass:
+ description:
+ - The value for C(node.session.auth.password).
+ type: str
+ node_user_in:
+ description:
+ - The value for C(node.session.auth.username_in).
+ type: str
+ version_added: 3.8.0
+ node_pass_in:
+ description:
+ - The value for C(node.session.auth.password_in).
+ type: str
+ version_added: 3.8.0
+ auto_node_startup:
+ description:
+ - Whether the target node should be automatically connected at startup.
+ type: bool
+ aliases: [automatic]
+ auto_portal_startup:
+ description:
+ - Whether the target node portal should be automatically connected at startup.
+ type: bool
+ version_added: 3.2.0
+ discover:
+ description:
+ - Whether the list of target nodes on the portal should be (re)discovered and added to the persistent iSCSI database.
+ - Keep in mind that C(iscsiadm) discovery resets configuration, like C(node.startup) to manual, hence combined with
+ O(auto_node_startup=true) always returns a changed state.
+ type: bool
+ default: false
+ show_nodes:
+ description:
+ - Whether the list of nodes in the persistent iSCSI database should be returned by the module.
+ type: bool
+ default: false
+ rescan:
+ description:
+ - Rescan an established session for discovering new targets.
+ - When O(target) is omitted, it rescans all sessions.
+ type: bool
+ default: false
+ version_added: 4.1.0
+"""
-EXAMPLES = r'''
+EXAMPLES = r"""
- name: Perform a discovery on sun.com and show available target nodes
community.general.open_iscsi:
show_nodes: true
@@ -144,7 +141,7 @@ EXAMPLES = r'''
community.general.open_iscsi:
rescan: true
target: iqn.1986-03.com.sun:02:f8c1f9e0-c3ec-ec84-c9c9-8bfb0cd5de3d
-'''
+"""
import glob
import os
diff --git a/plugins/modules/openbsd_pkg.py b/plugins/modules/openbsd_pkg.py
index 69ac7bff8e..e81fce3018 100644
--- a/plugins/modules/openbsd_pkg.py
+++ b/plugins/modules/openbsd_pkg.py
@@ -10,8 +10,7 @@ from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
module: openbsd_pkg
author:
- Patrik Lundin (@eest)
@@ -21,69 +20,64 @@ description:
extends_documentation_fragment:
- community.general.attributes
attributes:
- check_mode:
- support: full
- diff_mode:
- support: partial
- version_added: 9.1.0
- details:
- - Only works when check mode is not enabled.
+ check_mode:
+ support: full
+ diff_mode:
+ support: partial
+ version_added: 9.1.0
+ details:
+ - Only works when check mode is not enabled.
options:
- name:
- description:
- - A name or a list of names of the packages.
- required: true
- type: list
- elements: str
- state:
- description:
- - V(present) will make sure the package is installed.
- - V(latest) will make sure the latest version of the package is installed.
- - V(absent) will make sure the specified package is not installed.
- choices: [ absent, latest, present, installed, removed ]
- default: present
- type: str
- build:
- description:
- - Build the package from source instead of downloading and installing
- a binary. Requires that the port source tree is already installed.
- Automatically builds and installs the 'sqlports' package, if it is
- not already installed.
- - Mutually exclusive with O(snapshot).
- type: bool
- default: false
- snapshot:
- description:
- - Force C(%c) and C(%m) to expand to C(snapshots), even on a release kernel.
- - Mutually exclusive with O(build).
- type: bool
- default: false
- version_added: 1.3.0
- ports_dir:
- description:
- - When used in combination with the O(build) option, allows overriding
- the default ports source directory.
- default: /usr/ports
- type: path
- clean:
- description:
- - When updating or removing packages, delete the extra configuration
- file(s) in the old packages which are annotated with @extra in
- the packaging-list.
- type: bool
- default: false
- quick:
- description:
- - Replace or delete packages quickly; do not bother with checksums
- before removing normal files.
- type: bool
- default: false
+ name:
+ description:
+ - A name or a list of names of the packages.
+ required: true
+ type: list
+ elements: str
+ state:
+ description:
+ - V(present) ensures the package is installed.
+ - V(latest) ensures the latest version of the package is installed.
+ - V(absent) ensures the specified package is not installed.
+ choices: [absent, latest, present, installed, removed]
+ default: present
+ type: str
+ build:
+ description:
+ - Build the package from source instead of downloading and installing a binary. Requires that the port source tree is
+ already installed. Automatically builds and installs the C(sqlports) package, if it is not already installed.
+ - Mutually exclusive with O(snapshot).
+ type: bool
+ default: false
+ snapshot:
+ description:
+ - Force C(%c) and C(%m) to expand to C(snapshots), even on a release kernel.
+ - Mutually exclusive with O(build).
+ type: bool
+ default: false
+ version_added: 1.3.0
+ ports_dir:
+ description:
+ - When used in combination with the O(build) option, allows overriding the default ports source directory.
+ default: /usr/ports
+ type: path
+ clean:
+ description:
+ - When updating or removing packages, delete the extra configuration file(s) in the old packages which are annotated
+ with C(@extra) in the packaging-list.
+ type: bool
+ default: false
+ quick:
+ description:
+ - Replace or delete packages quickly; do not bother with checksums before removing normal files.
+ type: bool
+ default: false
notes:
- - When used with a C(loop:) each package will be processed individually,
- it is much more efficient to pass the list directly to the O(name) option.
-'''
+ - When used with a C(loop:) each package is processed individually, it is much more efficient to pass the list directly
+ to the O(name) option.
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Make sure nmap is installed
community.general.openbsd_pkg:
name: nmap
@@ -125,7 +119,7 @@ EXAMPLES = '''
name: '*'
state: latest
-- name: Purge a package and it's configuration files
+- name: Purge a package and its configuration files
community.general.openbsd_pkg:
name: mpd
clean: true
@@ -136,7 +130,7 @@ EXAMPLES = '''
name: qt5
quick: true
state: absent
-'''
+"""
import os
import platform
diff --git a/plugins/modules/opendj_backendprop.py b/plugins/modules/opendj_backendprop.py
index fed53532d9..be4edac125 100644
--- a/plugins/modules/opendj_backendprop.py
+++ b/plugins/modules/opendj_backendprop.py
@@ -8,94 +8,93 @@
from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
module: opendj_backendprop
-short_description: Will update the backend configuration of OpenDJ via the dsconfig set-backend-prop command
+short_description: Update the backend configuration of OpenDJ using the dsconfig set-backend-prop command
description:
- - This module will update settings for OpenDJ with the command set-backend-prop.
- - It will check first via de get-backend-prop if configuration needs to be applied.
+ - This module updates settings for OpenDJ with the command C(set-backend-prop).
+ - It checks first using C(get-backend-prop) if configuration needs to be applied.
author:
- - Werner Dijkerman (@dj-wasabi)
+ - Werner Dijkerman (@dj-wasabi)
extends_documentation_fragment:
- - community.general.attributes
+ - community.general.attributes
attributes:
- check_mode:
- support: full
- diff_mode:
- support: none
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
options:
- opendj_bindir:
- description:
- - The path to the bin directory of OpenDJ.
- required: false
- default: /opt/opendj/bin
- type: path
- hostname:
- description:
- - The hostname of the OpenDJ server.
- required: true
- type: str
- port:
- description:
- - The Admin port on which the OpenDJ instance is available.
- required: true
- type: str
- username:
- description:
- - The username to connect to.
- required: false
- default: cn=Directory Manager
- type: str
- password:
- description:
- - The password for the cn=Directory Manager user.
- - Either password or passwordfile is needed.
- required: false
- type: str
- passwordfile:
- description:
- - Location to the password file which holds the password for the cn=Directory Manager user.
- - Either password or passwordfile is needed.
- required: false
- type: path
- backend:
- description:
- - The name of the backend on which the property needs to be updated.
- required: true
- type: str
- name:
- description:
- - The configuration setting to update.
- required: true
- type: str
- value:
- description:
- - The value for the configuration item.
- required: true
- type: str
- state:
- description:
- - If configuration needs to be added/updated
- required: false
- default: "present"
- type: str
-'''
+ opendj_bindir:
+ description:
+ - The path to the bin directory of OpenDJ.
+ required: false
+ default: /opt/opendj/bin
+ type: path
+ hostname:
+ description:
+ - The hostname of the OpenDJ server.
+ required: true
+ type: str
+ port:
+ description:
+ - The Admin port on which the OpenDJ instance is available.
+ required: true
+ type: str
+ username:
+ description:
+ - The username to connect to.
+ required: false
+ default: cn=Directory Manager
+ type: str
+ password:
+ description:
+ - The password for the C(cn=Directory Manager) user.
+ - Either password or passwordfile is needed.
+ required: false
+ type: str
+ passwordfile:
+ description:
+ - Location to the password file which holds the password for the C(cn=Directory Manager) user.
+ - Either password or passwordfile is needed.
+ required: false
+ type: path
+ backend:
+ description:
+ - The name of the backend on which the property needs to be updated.
+ required: true
+ type: str
+ name:
+ description:
+ - The configuration setting to update.
+ required: true
+ type: str
+ value:
+ description:
+ - The value for the configuration item.
+ required: true
+ type: str
+ state:
+ description:
+ - If configuration needs to be added/updated.
+ required: false
+ default: "present"
+ type: str
+"""
-EXAMPLES = '''
- - name: Add or update OpenDJ backend properties
- action: opendj_backendprop
- hostname=localhost
- port=4444
- username="cn=Directory Manager"
- password=password
- backend=userRoot
- name=index-entry-limit
- value=5000
-'''
+EXAMPLES = r"""
+- name: Add or update OpenDJ backend properties
+ opendj_backendprop:
+ hostname: localhost
+ port: 4444
+ username: "cn=Directory Manager"
+ password: password
+ backend: userRoot
+ name: index-entry-limit
+ value: 5000
+"""
-RETURN = '''
-'''
+RETURN = r"""
+"""
from ansible.module_utils.basic import AnsibleModule
@@ -154,9 +153,9 @@ def main():
opendj_bindir=dict(default="/opt/opendj/bin", type="path"),
hostname=dict(required=True),
port=dict(required=True),
- username=dict(default="cn=Directory Manager", required=False),
- password=dict(required=False, no_log=True),
- passwordfile=dict(required=False, type="path"),
+ username=dict(default="cn=Directory Manager"),
+ password=dict(no_log=True),
+ passwordfile=dict(type="path"),
backend=dict(required=True),
name=dict(required=True),
value=dict(required=True),
diff --git a/plugins/modules/openwrt_init.py b/plugins/modules/openwrt_init.py
index 46fdea5e27..c8c98f2d39 100644
--- a/plugins/modules/openwrt_init.py
+++ b/plugins/modules/openwrt_init.py
@@ -8,52 +8,50 @@ from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = '''
+DOCUMENTATION = r"""
module: openwrt_init
author:
- - "Andrew Gaffney (@agaffney)"
+ - "Andrew Gaffney (@agaffney)"
short_description: Manage services on OpenWrt
description:
- - Controls OpenWrt services on remote hosts.
+ - Controls OpenWrt services on remote hosts.
extends_documentation_fragment:
- - community.general.attributes
+ - community.general.attributes
attributes:
- check_mode:
- support: full
- diff_mode:
- support: none
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
options:
- name:
- type: str
- description:
- - Name of the service.
- required: true
- aliases: ['service']
- state:
- type: str
- description:
- - V(started)/V(stopped) are idempotent actions that will not run commands unless necessary.
- - V(restarted) will always bounce the service.
- - V(reloaded) will always reload.
- choices: [ 'started', 'stopped', 'restarted', 'reloaded' ]
- enabled:
- description:
- - Whether the service should start on boot. B(At least one of state and enabled are required.)
- type: bool
- pattern:
- type: str
- description:
- - If the service does not respond to the 'running' command, name a
- substring to look for as would be found in the output of the C(ps)
- command as a stand-in for a 'running' result. If the string is found,
- the service will be assumed to be running.
+ name:
+ type: str
+ description:
+ - Name of the service.
+ required: true
+ aliases: ['service']
+ state:
+ type: str
+ description:
+ - V(started)/V(stopped) are idempotent actions that do not run commands unless necessary.
+ - V(restarted) always bounces the service.
+ - V(reloaded) always reloads.
+ choices: ['started', 'stopped', 'restarted', 'reloaded']
+ enabled:
+ description:
+ - Whether the service should start on boot. B(At least one) of O(state) and O(enabled) are required.
+ type: bool
+ pattern:
+ type: str
+ description:
+ - If the service does not respond to the C(running) command, name a substring to look for as would be found in the output
+ of the C(ps) command as a stand-in for a C(running) result. If the string is found, the service is assumed to be running.
notes:
- - One option other than name is required.
+ - One option other than O(name) is required.
requirements:
- - An OpenWrt system (with python)
-'''
+ - An OpenWrt system (with python)
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Start service httpd, if not running
community.general.openwrt_init:
state: started
@@ -73,10 +71,10 @@ EXAMPLES = '''
community.general.openwrt_init:
name: httpd
enabled: true
-'''
+"""
-RETURN = '''
-'''
+RETURN = r"""
+"""
import os
from ansible.module_utils.basic import AnsibleModule
diff --git a/plugins/modules/opkg.py b/plugins/modules/opkg.py
index 2f9794ab86..b57fbd7df7 100644
--- a/plugins/modules/opkg.py
+++ b/plugins/modules/opkg.py
@@ -11,71 +11,69 @@ from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
module: opkg
author: "Patrick Pelletier (@skinp)"
short_description: Package manager for OpenWrt and Openembedded/Yocto based Linux distributions
description:
- - Manages ipk packages for OpenWrt and Openembedded/Yocto based Linux distributions
+ - Manages ipk packages for OpenWrt and Openembedded/Yocto based Linux distributions.
extends_documentation_fragment:
- - community.general.attributes
+ - community.general.attributes
attributes:
- check_mode:
- support: none
- diff_mode:
- support: none
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
options:
- name:
- description:
- - Name of package(s) to install/remove.
- - C(NAME=VERSION) syntax is also supported to install a package
- in a certain version. See the examples. This only works on Yocto based
- Linux distributions (opkg>=0.3.2) and not for OpenWrt. This is
- supported since community.general 6.2.0.
- aliases: [pkg]
- required: true
- type: list
- elements: str
- state:
- description:
- - State of the package.
- choices: [ 'present', 'absent', 'installed', 'removed' ]
- default: present
- type: str
- force:
- description:
- - The C(opkg --force) parameter used.
- - Passing V("") as value and not passing any value at all have both
- the same effect of B(not) using any C(--force-) parameter.
- choices:
- - ""
- - "depends"
- - "maintainer"
- - "reinstall"
- - "overwrite"
- - "downgrade"
- - "space"
- - "postinstall"
- - "remove"
- - "checksum"
- - "removal-of-dependent-packages"
- type: str
- update_cache:
- description:
- - Update the package DB first.
- default: false
- type: bool
- executable:
- description:
- - The executable location for C(opkg).
- type: path
- version_added: 7.2.0
+ name:
+ description:
+ - Name of package(s) to install/remove.
+ - V(NAME=VERSION) syntax is also supported to install a package in a certain version. See the examples. This only works
+ on Yocto based Linux distributions (opkg>=0.3.2) and not for OpenWrt. This is supported since community.general 6.2.0.
+ aliases: [pkg]
+ required: true
+ type: list
+ elements: str
+ state:
+ description:
+ - State of the package.
+ choices: ['present', 'absent', 'installed', 'removed']
+ default: present
+ type: str
+ force:
+ description:
+ - The C(opkg --force) parameter used.
+ - State V("") is deprecated and will be removed in community.general 12.0.0. Please omit the parameter O(force) to obtain
+ the same behavior.
+ choices:
+ - ""
+ - "depends"
+ - "maintainer"
+ - "reinstall"
+ - "overwrite"
+ - "downgrade"
+ - "space"
+ - "postinstall"
+ - "remove"
+ - "checksum"
+ - "removal-of-dependent-packages"
+ type: str
+ update_cache:
+ description:
+ - Update the package DB first.
+ default: false
+ type: bool
+ executable:
+ description:
+ - The executable location for C(opkg).
+ type: path
+ version_added: 7.2.0
requirements:
- - opkg
- - python
-'''
-EXAMPLES = '''
+ - opkg
+ - python
+"""
+
+EXAMPLES = r"""
- name: Install foo
community.general.opkg:
name: foo
@@ -109,7 +107,16 @@ EXAMPLES = '''
name: foo
state: present
force: overwrite
-'''
+"""
+
+RETURN = r"""
+version:
+ description: Version of opkg.
+ type: str
+ returned: always
+ sample: "2.80.0"
+ version_added: 10.0.0
+"""
import os
from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, cmd_runner_fmt
@@ -127,7 +134,6 @@ class Opkg(StateModuleHelper):
executable=dict(type="path"),
),
)
- use_old_vardict = False
def __init_module__(self):
self.vars.set("install_c", 0, output=False, change=True)
@@ -142,7 +148,11 @@ class Opkg(StateModuleHelper):
)
def _force(value):
+ # 12.0.0 function _force() to be removed entirely
if value == "":
+ self.deprecate('Value "" is deprecated. Simply omit the parameter "force" to prevent any --force-X argument when running opkg',
+ version="12.0.0",
+ collection_name="community.general")
value = None
return cmd_runner_fmt.as_optval("--force-")(value, ctx_ignore_none=True)
@@ -154,12 +164,17 @@ class Opkg(StateModuleHelper):
arg_formats=dict(
package=cmd_runner_fmt.as_list(),
state=cmd_runner_fmt.as_map(state_map),
- force=cmd_runner_fmt.as_func(_force),
+ force=cmd_runner_fmt.as_func(_force), # 12.0.0 replace with cmd_runner_fmt.as_optval("--force-")
update_cache=cmd_runner_fmt.as_bool("update"),
+ version=cmd_runner_fmt.as_fixed("--version"),
),
path_prefix=dir,
)
+ with self.runner("version") as ctx:
+ rc, out, err = ctx.run()
+ self.vars.version = out.strip().replace("opkg version ", "")
+
if self.vars.update_cache:
rc, dummy, dummy = self.runner("update_cache").run()
if rc != 0:
@@ -186,13 +201,12 @@ class Opkg(StateModuleHelper):
pkg_name, pkg_version = self.split_name_and_version(package)
if not self._package_in_desired_state(pkg_name, want_installed=True, version=pkg_version) or self.vars.force == "reinstall":
ctx.run(package=package)
+ self.vars.set("run_info", ctx.run_info, verbosity=4)
if not self._package_in_desired_state(pkg_name, want_installed=True, version=pkg_version):
self.do_raise("failed to install %s" % package)
self.vars.install_c += 1
- if self.verbosity >= 4:
- self.vars.run_info = ctx.run_info
if self.vars.install_c > 0:
- self.vars.msg = "installed %s package(s)" % (self.vars.install_c)
+ self.vars.msg = "installed %s package(s)" % self.vars.install_c
else:
self.vars.msg = "package(s) already present"
@@ -202,13 +216,12 @@ class Opkg(StateModuleHelper):
package, dummy = self.split_name_and_version(package)
if not self._package_in_desired_state(package, want_installed=False):
ctx.run(package=package)
+ self.vars.set("run_info", ctx.run_info, verbosity=4)
if not self._package_in_desired_state(package, want_installed=False):
self.do_raise("failed to remove %s" % package)
self.vars.remove_c += 1
- if self.verbosity >= 4:
- self.vars.run_info = ctx.run_info
if self.vars.remove_c > 0:
- self.vars.msg = "removed %s package(s)" % (self.vars.remove_c)
+ self.vars.msg = "removed %s package(s)" % self.vars.remove_c
else:
self.vars.msg = "package(s) already absent"
diff --git a/plugins/modules/osx_defaults.py b/plugins/modules/osx_defaults.py
index db5d889a37..56ff6e1ac1 100644
--- a/plugins/modules/osx_defaults.py
+++ b/plugins/modules/osx_defaults.py
@@ -10,18 +10,17 @@
from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = r'''
----
+DOCUMENTATION = r"""
module: osx_defaults
author:
# DO NOT RE-ADD GITHUB HANDLE!
-- Franck Nijhof (!UNKNOWN)
+ - Franck Nijhof (!UNKNOWN)
short_description: Manage macOS user defaults
description:
- - osx_defaults allows users to read, write, and delete macOS user defaults from Ansible scripts.
- - macOS applications and other programs use the defaults system to record user preferences and other
- information that must be maintained when the applications are not running (such as default font for new
- documents, or the position of an Info panel).
+ - This module allows users to read, write, and delete macOS user defaults from Ansible scripts.
+ - MacOS applications and other programs use the defaults system to record user preferences and other information that must
+ be maintained when the applications are not running (such as default font for new documents, or the position of an Info
+ panel).
extends_documentation_fragment:
- community.general.attributes
attributes:
@@ -48,7 +47,7 @@ options:
description:
- The type of value to write.
type: str
- choices: [ array, bool, boolean, date, float, int, integer, string ]
+ choices: [array, bool, boolean, date, float, int, integer, string]
default: string
check_type:
description:
@@ -70,9 +69,9 @@ options:
state:
description:
- The state of the user defaults.
- - If set to V(list) will query the given parameter specified by O(key). Returns V(null) is nothing found or mis-spelled.
+ - If set to V(list) it queries the given parameter specified by O(key). Returns V(null) is nothing found or misspelled.
type: str
- choices: [ absent, list, present ]
+ choices: [absent, list, present]
default: present
path:
description:
@@ -80,10 +79,10 @@ options:
type: str
default: /usr/bin:/usr/local/bin
notes:
- - Apple Mac caches defaults. You may need to logout and login to apply the changes.
-'''
+ - Apple Mac caches defaults. You may need to logout and login to apply the changes.
+"""
-EXAMPLES = r'''
+EXAMPLES = r"""
- name: Set boolean valued key for application domain
community.general.osx_defaults:
domain: com.apple.Safari
@@ -135,7 +134,7 @@ EXAMPLES = r'''
domain: com.geekchimp.macable
key: ExampleKeyToRemove
state: absent
-'''
+"""
from datetime import datetime
import re
@@ -192,7 +191,7 @@ class OSXDefaults(object):
@staticmethod
def is_int(value):
as_str = str(value)
- if (as_str.startswith("-")):
+ if as_str.startswith("-"):
return as_str[1:].isdigit()
else:
return as_str.isdigit()
diff --git a/plugins/modules/ovh_ip_failover.py b/plugins/modules/ovh_ip_failover.py
index 58d340e3e9..425ee614f5 100644
--- a/plugins/modules/ovh_ip_failover.py
+++ b/plugins/modules/ovh_ip_failover.py
@@ -9,88 +9,80 @@ from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
module: ovh_ip_failover
short_description: Manage OVH IP failover address
description:
- - Manage OVH (French European hosting provider) IP Failover Address. For now, this module can only be used to move
- an ip failover (or failover block) between services
+ - Manage OVH (French European hosting provider) IP Failover Address. For now, this module can only be used to move an IP
+ failover (or failover block) between services.
author: "Pascal HERAUD (@pascalheraud)"
notes:
- - Uses the python OVH Api U(https://github.com/ovh/python-ovh).
- You have to create an application (a key and secret) with a consumer
- key as described into U(https://docs.ovh.com/gb/en/customer/first-steps-with-ovh-api/)
+ - Uses the Python OVH API U(https://github.com/ovh/python-ovh). You have to create an application (a key and secret) with
+ a consumer key as described into U(https://docs.ovh.com/gb/en/customer/first-steps-with-ovh-api/).
requirements:
- - ovh >= 0.4.8
+ - ovh >= 0.4.8
extends_documentation_fragment:
- - community.general.attributes
+ - community.general.attributes
attributes:
- check_mode:
- support: full
- diff_mode:
- support: none
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
options:
- name:
- required: true
- description:
- - The IP address to manage (can be a single IP like 1.1.1.1
- or a block like 1.1.1.1/28 )
- type: str
- service:
- required: true
- description:
- - The name of the OVH service this IP address should be routed
- type: str
- endpoint:
- required: true
- description:
- - The endpoint to use ( for instance ovh-eu)
- type: str
- wait_completion:
- required: false
- default: true
- type: bool
- description:
- - If true, the module will wait for the IP address to be moved.
- If false, exit without waiting. The taskId will be returned
- in module output
- wait_task_completion:
- required: false
- default: 0
- description:
- - If not 0, the module will wait for this task id to be
- completed. Use wait_task_completion if you want to wait for
- completion of a previously executed task with
- wait_completion=false. You can execute this module repeatedly on
- a list of failover IPs using wait_completion=false (see examples)
- type: int
- application_key:
- required: true
- description:
- - The applicationKey to use
- type: str
- application_secret:
- required: true
- description:
- - The application secret to use
- type: str
- consumer_key:
- required: true
- description:
- - The consumer key to use
- type: str
- timeout:
- required: false
- default: 120
- description:
- - The timeout in seconds used to wait for a task to be
- completed. Default is 120 seconds.
- type: int
+ name:
+ required: true
+ description:
+ - The IP address to manage (can be a single IP like V(1.1.1.1) or a block like V(1.1.1.1/28)).
+ type: str
+ service:
+ required: true
+ description:
+ - The name of the OVH service this IP address should be routed.
+ type: str
+ endpoint:
+ required: true
+ description:
+ - The endpoint to use (for instance V(ovh-eu)).
+ type: str
+ wait_completion:
+ required: false
+ default: true
+ type: bool
+ description:
+ - If V(true), the module waits for the IP address to be moved. If false, exit without waiting. The C(taskId) is returned
+ in module output.
+ wait_task_completion:
+ required: false
+ default: 0
+ description:
+ - If not V(0), the module waits for this task ID to be completed. Use O(wait_task_completion) if you want to wait for
+ completion of a previously executed task with O(wait_completion=false). You can execute this module repeatedly on
+ a list of failover IPs using O(wait_completion=false) (see examples).
+ type: int
+ application_key:
+ required: true
+ description:
+ - The applicationKey to use.
+ type: str
+ application_secret:
+ required: true
+ description:
+ - The application secret to use.
+ type: str
+ consumer_key:
+ required: true
+ description:
+ - The consumer key to use.
+ type: str
+ timeout:
+ required: false
+ default: 120
+ description:
+ - The timeout in seconds used to wait for a task to be completed. Default is 120 seconds.
+ type: int
+"""
-'''
-
-EXAMPLES = '''
+EXAMPLES = r"""
# Route an IP address 1.1.1.1 to the service ns666.ovh.net
- community.general.ovh_ip_failover:
name: 1.1.1.1
@@ -116,10 +108,10 @@ EXAMPLES = '''
application_key: yourkey
application_secret: yoursecret
consumer_key: yourconsumerkey
-'''
+"""
-RETURN = '''
-'''
+RETURN = r"""
+"""
import time
@@ -167,7 +159,7 @@ def waitForTaskDone(client, name, taskId, timeout):
task = client.get('/ip/{0}/task/{1}'.format(quote_plus(name), taskId))
if task['status'] == 'done':
return True
- time.sleep(5) # Delay for 5 sec because it's long to wait completion, do not harass the API
+ time.sleep(5) # Delay for 5 sec to not harass the API
currentTimeout -= 5
if currentTimeout < 0:
return False
diff --git a/plugins/modules/ovh_ip_loadbalancing_backend.py b/plugins/modules/ovh_ip_loadbalancing_backend.py
index f70b5804a7..8bf294a1d5 100644
--- a/plugins/modules/ovh_ip_loadbalancing_backend.py
+++ b/plugins/modules/ovh_ip_loadbalancing_backend.py
@@ -9,85 +9,80 @@ from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
module: ovh_ip_loadbalancing_backend
short_description: Manage OVH IP LoadBalancing backends
description:
- - Manage OVH (French European hosting provider) LoadBalancing IP backends
+ - Manage OVH (French European hosting provider) LoadBalancing IP backends.
author: Pascal Heraud (@pascalheraud)
notes:
- - Uses the python OVH Api U(https://github.com/ovh/python-ovh).
- You have to create an application (a key and secret) with a consumer
- key as described into U(https://docs.ovh.com/gb/en/customer/first-steps-with-ovh-api/)
+ - Uses the Python OVH API U(https://github.com/ovh/python-ovh). You have to create an application (a key and secret) with
+ a consumer key as described into U(https://docs.ovh.com/gb/en/customer/first-steps-with-ovh-api/).
requirements:
- - ovh > 0.3.5
+ - ovh > 0.3.5
extends_documentation_fragment:
- - community.general.attributes
+ - community.general.attributes
attributes:
- check_mode:
- support: none
- diff_mode:
- support: none
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
options:
- name:
- required: true
- description:
- - Name of the LoadBalancing internal name (ip-X.X.X.X)
- type: str
- backend:
- required: true
- description:
- - The IP address of the backend to update / modify / delete
- type: str
- state:
- default: present
- choices: ['present', 'absent']
- description:
- - Determines whether the backend is to be created/modified
- or deleted
- type: str
- probe:
- default: 'none'
- choices: ['none', 'http', 'icmp' , 'oco']
- description:
- - Determines the type of probe to use for this backend
- type: str
- weight:
- default: 8
- description:
- - Determines the weight for this backend
- type: int
- endpoint:
- required: true
- description:
- - The endpoint to use ( for instance ovh-eu)
- type: str
- application_key:
- required: true
- description:
- - The applicationKey to use
- type: str
- application_secret:
- required: true
- description:
- - The application secret to use
- type: str
- consumer_key:
- required: true
- description:
- - The consumer key to use
- type: str
- timeout:
- default: 120
- description:
- - The timeout in seconds used to wait for a task to be
- completed.
- type: int
+ name:
+ required: true
+ description:
+ - Name of the LoadBalancing internal name (V(ip-X.X.X.X)).
+ type: str
+ backend:
+ required: true
+ description:
+ - The IP address of the backend to update / modify / delete.
+ type: str
+ state:
+ default: present
+ choices: ['present', 'absent']
+ description:
+ - Determines whether the backend is to be created/modified or deleted.
+ type: str
+ probe:
+ default: 'none'
+ choices: ['none', 'http', 'icmp', 'oco']
+ description:
+ - Determines the type of probe to use for this backend.
+ type: str
+ weight:
+ default: 8
+ description:
+ - Determines the weight for this backend.
+ type: int
+ endpoint:
+ required: true
+ description:
+ - The endpoint to use (for instance V(ovh-eu)).
+ type: str
+ application_key:
+ required: true
+ description:
+ - The applicationKey to use.
+ type: str
+ application_secret:
+ required: true
+ description:
+ - The application secret to use.
+ type: str
+ consumer_key:
+ required: true
+ description:
+ - The consumer key to use.
+ type: str
+ timeout:
+ default: 120
+ description:
+ - The timeout in seconds used to wait for a task to be completed.
+ type: int
+"""
-'''
-
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Adds or modify the backend '212.1.1.1' to a loadbalancing 'ip-1.1.1.1'
ovh_ip_loadbalancing:
name: ip-1.1.1.1
@@ -109,10 +104,10 @@ EXAMPLES = '''
application_key: yourkey
application_secret: yoursecret
consumer_key: yourconsumerkey
-'''
+"""
-RETURN = '''
-'''
+RETURN = r"""
+"""
import time
@@ -249,7 +244,7 @@ def main():
'parameters. Error returned by OVH api was : {0}'
.format(apiError))
- if (backendProperties['weight'] != weight):
+ if backendProperties['weight'] != weight:
# Change weight
try:
client.post(
@@ -268,7 +263,7 @@ def main():
.format(apiError))
moduleChanged = True
- if (backendProperties['probe'] != probe):
+ if backendProperties['probe'] != probe:
# Change probe
backendProperties['probe'] = probe
try:
diff --git a/plugins/modules/ovh_monthly_billing.py b/plugins/modules/ovh_monthly_billing.py
index c2f503e3ad..912b697517 100644
--- a/plugins/modules/ovh_monthly_billing.py
+++ b/plugins/modules/ovh_monthly_billing.py
@@ -9,52 +9,51 @@ from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
module: ovh_monthly_billing
author: Francois Lallart (@fraff)
version_added: '0.2.0'
short_description: Manage OVH monthly billing
description:
- - Enable monthly billing on OVH cloud instances (be aware OVH does not allow to disable it).
-requirements: [ "ovh" ]
+ - Enable monthly billing on OVH cloud instances (be aware OVH does not allow to disable it).
+requirements: ["ovh"]
extends_documentation_fragment:
- - community.general.attributes
+ - community.general.attributes
attributes:
- check_mode:
- support: full
- diff_mode:
- support: none
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
options:
- project_id:
- required: true
- type: str
- description:
- - ID of the project, get it with U(https://api.ovh.com/console/#/cloud/project#GET)
- instance_id:
- required: true
- type: str
- description:
- - ID of the instance, get it with U(https://api.ovh.com/console/#/cloud/project/%7BserviceName%7D/instance#GET)
- endpoint:
- type: str
- description:
- - The endpoint to use (for instance ovh-eu)
- application_key:
- type: str
- description:
- - The applicationKey to use
- application_secret:
- type: str
- description:
- - The application secret to use
- consumer_key:
- type: str
- description:
- - The consumer key to use
-'''
+ project_id:
+ required: true
+ type: str
+ description:
+ - ID of the project, get it with U(https://api.ovh.com/console/#/cloud/project#GET).
+ instance_id:
+ required: true
+ type: str
+ description:
+ - ID of the instance, get it with U(https://api.ovh.com/console/#/cloud/project/%7BserviceName%7D/instance#GET).
+ endpoint:
+ type: str
+ description:
+ - The endpoint to use (for instance V(ovh-eu)).
+ application_key:
+ type: str
+ description:
+ - The applicationKey to use.
+ application_secret:
+ type: str
+ description:
+ - The application secret to use.
+ consumer_key:
+ type: str
+ description:
+ - The consumer key to use.
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Basic usage, using auth from /etc/ovh.conf
community.general.ovh_monthly_billing:
project_id: 0c727a20aa144485b70c44dee9123b46
@@ -75,10 +74,10 @@ EXAMPLES = '''
application_key: yourkey
application_secret: yoursecret
consumer_key: yourconsumerkey
-'''
+"""
-RETURN = '''
-'''
+RETURN = r"""
+"""
import traceback
@@ -99,10 +98,10 @@ def main():
argument_spec=dict(
project_id=dict(required=True),
instance_id=dict(required=True),
- endpoint=dict(required=False),
- application_key=dict(required=False, no_log=True),
- application_secret=dict(required=False, no_log=True),
- consumer_key=dict(required=False, no_log=True),
+ endpoint=dict(),
+ application_key=dict(no_log=True),
+ application_secret=dict(no_log=True),
+ consumer_key=dict(no_log=True),
),
supports_check_mode=True
)
diff --git a/plugins/modules/pacemaker_cluster.py b/plugins/modules/pacemaker_cluster.py
index af8bb5ff56..ffed13f9c5 100644
--- a/plugins/modules/pacemaker_cluster.py
+++ b/plugins/modules/pacemaker_cluster.py
@@ -13,6 +13,7 @@ module: pacemaker_cluster
short_description: Manage pacemaker clusters
author:
- Mathieu Bultel (@matbu)
+ - Dexter Le (@munchtoast)
description:
- This module can manage a pacemaker cluster and nodes from Ansible using the pacemaker CLI.
extends_documentation_fragment:
@@ -26,17 +27,20 @@ options:
state:
description:
- Indicate desired state of the cluster.
- choices: [cleanup, offline, online, restart]
+ - The value V(maintenance) has been added in community.general 11.1.0.
+ choices: [cleanup, offline, online, restart, maintenance]
type: str
- node:
+ name:
description:
- - Specify which node of the cluster you want to manage. V(null) == the cluster status itself, V(all) == check the status of all nodes.
+ - Specify which node of the cluster you want to manage. V(null) == the cluster status itself, V(all) == check the status
+ of all nodes.
type: str
+ aliases: ['node']
timeout:
description:
- - Timeout when the module should considered that the action has failed.
- default: 300
+ - Timeout period (in seconds) for polling the cluster operation.
type: int
+ default: 300
force:
description:
- Force the change of the cluster state.
@@ -62,164 +66,104 @@ out:
returned: always
"""
-import time
-
-from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.module_helper import StateModuleHelper
+from ansible_collections.community.general.plugins.module_utils.pacemaker import pacemaker_runner, get_pacemaker_maintenance_mode
-_PCS_CLUSTER_DOWN = "Error: cluster is not currently running on this node"
+class PacemakerCluster(StateModuleHelper):
+ module = dict(
+ argument_spec=dict(
+ state=dict(type='str', choices=[
+ 'cleanup', 'offline', 'online', 'restart', 'maintenance']),
+ name=dict(type='str', aliases=['node']),
+ timeout=dict(type='int', default=300),
+ force=dict(type='bool', default=True)
+ ),
+ supports_check_mode=True,
+ )
+ default_state = ""
+ def __init_module__(self):
+ self.runner = pacemaker_runner(self.module)
+ self.vars.set('apply_all', True if not self.module.params['name'] else False)
+ get_args = dict([('cli_action', 'cluster'), ('state', 'status'), ('name', None), ('apply_all', self.vars.apply_all)])
+ if self.module.params['state'] == "maintenance":
+ get_args['cli_action'] = "property"
+ get_args['state'] = "config"
+ get_args['name'] = "maintenance-mode"
+ elif self.module.params['state'] == "cleanup":
+ get_args['cli_action'] = "resource"
+ get_args['name'] = self.module.params['name']
-def get_cluster_status(module):
- cmd = "pcs cluster status"
- rc, out, err = module.run_command(cmd)
- if out in _PCS_CLUSTER_DOWN:
- return 'offline'
- else:
- return 'online'
+ self.vars.set('get_args', get_args)
+ self.vars.set('previous_value', self._get()['out'])
+ self.vars.set('value', self.vars.previous_value, change=True, diff=True)
+ if not self.module.params['state']:
+ self.module.deprecate(
+ 'Parameter "state" values not set is being deprecated. Make sure to provide a value for "state"',
+ version='12.0.0',
+ collection_name='community.general'
+ )
-def get_node_status(module, node='all'):
- if node == 'all':
- cmd = "pcs cluster pcsd-status %s" % node
- else:
- cmd = "pcs cluster pcsd-status"
- rc, out, err = module.run_command(cmd)
- if rc == 1:
- module.fail_json(msg="Command execution failed.\nCommand: `%s`\nError: %s" % (cmd, err))
- status = []
- for o in out.splitlines():
- status.append(o.split(':'))
- return status
+ def __quit_module__(self):
+ self.vars.set('value', self._get()['out'])
+ def _process_command_output(self, fail_on_err, ignore_err_msg=""):
+ def process(rc, out, err):
+ if fail_on_err and rc != 0 and err and ignore_err_msg not in err:
+ self.do_raise('pcs failed with error (rc={0}): {1}'.format(rc, err))
+ out = out.rstrip()
+ return None if out == "" else out
+ return process
-def clean_cluster(module, timeout):
- cmd = "pcs resource cleanup"
- rc, out, err = module.run_command(cmd)
- if rc == 1:
- module.fail_json(msg="Command execution failed.\nCommand: `%s`\nError: %s" % (cmd, err))
+ def _get(self):
+ with self.runner('cli_action state name') as ctx:
+ result = ctx.run(cli_action=self.vars.get_args['cli_action'], state=self.vars.get_args['state'], name=self.vars.get_args['name'])
+ return dict([('rc', result[0]),
+ ('out', result[1] if result[1] != "" else None),
+ ('err', result[2])])
+ def state_cleanup(self):
+ with self.runner('cli_action state name', output_process=self._process_command_output(True, "Fail"), check_mode_skip=True) as ctx:
+ ctx.run(cli_action='resource')
-def set_cluster(module, state, timeout, force):
- if state == 'online':
- cmd = "pcs cluster start"
- if state == 'offline':
- cmd = "pcs cluster stop"
- if force:
- cmd = "%s --force" % cmd
- rc, out, err = module.run_command(cmd)
- if rc == 1:
- module.fail_json(msg="Command execution failed.\nCommand: `%s`\nError: %s" % (cmd, err))
+ def state_offline(self):
+ with self.runner('cli_action state name apply_all wait',
+ output_process=self._process_command_output(True, "not currently running"),
+ check_mode_skip=True) as ctx:
+ ctx.run(cli_action='cluster', apply_all=self.vars.apply_all, wait=self.module.params['timeout'])
- t = time.time()
- ready = False
- while time.time() < t + timeout:
- cluster_state = get_cluster_status(module)
- if cluster_state == state:
- ready = True
- break
- if not ready:
- module.fail_json(msg="Failed to set the state `%s` on the cluster\n" % (state))
+ def state_online(self):
+ with self.runner('cli_action state name apply_all wait',
+ output_process=self._process_command_output(True, "currently running"),
+ check_mode_skip=True) as ctx:
+ ctx.run(cli_action='cluster', apply_all=self.vars.apply_all, wait=self.module.params['timeout'])
+ if get_pacemaker_maintenance_mode(self.runner):
+ with self.runner('cli_action state name', output_process=self._process_command_output(True, "Fail"), check_mode_skip=True) as ctx:
+ ctx.run(cli_action='property', state='maintenance', name='maintenance-mode=false')
-def set_node(module, state, timeout, force, node='all'):
- # map states
- if state == 'online':
- cmd = "pcs cluster start"
- if state == 'offline':
- cmd = "pcs cluster stop"
- if force:
- cmd = "%s --force" % cmd
+ def state_maintenance(self):
+ with self.runner('cli_action state name',
+ output_process=self._process_command_output(True, "Fail"),
+ check_mode_skip=True) as ctx:
+ ctx.run(cli_action='property', name='maintenance-mode=true')
- nodes_state = get_node_status(module, node)
- for node in nodes_state:
- if node[1].strip().lower() != state:
- cmd = "%s %s" % (cmd, node[0].strip())
- rc, out, err = module.run_command(cmd)
- if rc == 1:
- module.fail_json(msg="Command execution failed.\nCommand: `%s`\nError: %s" % (cmd, err))
+ def state_restart(self):
+ with self.runner('cli_action state name apply_all wait',
+ output_process=self._process_command_output(True, "not currently running"),
+ check_mode_skip=True) as ctx:
+ ctx.run(cli_action='cluster', state='offline', apply_all=self.vars.apply_all, wait=self.module.params['timeout'])
+ ctx.run(cli_action='cluster', state='online', apply_all=self.vars.apply_all, wait=self.module.params['timeout'])
- t = time.time()
- ready = False
- while time.time() < t + timeout:
- nodes_state = get_node_status(module)
- for node in nodes_state:
- if node[1].strip().lower() == state:
- ready = True
- break
- if not ready:
- module.fail_json(msg="Failed to set the state `%s` on the cluster\n" % (state))
+ if get_pacemaker_maintenance_mode(self.runner):
+ with self.runner('cli_action state name', output_process=self._process_command_output(True, "Fail"), check_mode_skip=True) as ctx:
+ ctx.run(cli_action='property', state='maintenance', name='maintenance-mode=false')
def main():
- argument_spec = dict(
- state=dict(type='str', choices=['online', 'offline', 'restart', 'cleanup']),
- node=dict(type='str'),
- timeout=dict(type='int', default=300),
- force=dict(type='bool', default=True),
- )
-
- module = AnsibleModule(
- argument_spec,
- supports_check_mode=True,
- )
- changed = False
- state = module.params['state']
- node = module.params['node']
- force = module.params['force']
- timeout = module.params['timeout']
-
- if state in ['online', 'offline']:
- # Get cluster status
- if node is None:
- cluster_state = get_cluster_status(module)
- if cluster_state == state:
- module.exit_json(changed=changed, out=cluster_state)
- else:
- if module.check_mode:
- module.exit_json(changed=True)
- set_cluster(module, state, timeout, force)
- cluster_state = get_cluster_status(module)
- if cluster_state == state:
- module.exit_json(changed=True, out=cluster_state)
- else:
- module.fail_json(msg="Fail to bring the cluster %s" % state)
- else:
- cluster_state = get_node_status(module, node)
- # Check cluster state
- for node_state in cluster_state:
- if node_state[1].strip().lower() == state:
- module.exit_json(changed=changed, out=cluster_state)
- else:
- if module.check_mode:
- module.exit_json(changed=True)
- # Set cluster status if needed
- set_cluster(module, state, timeout, force)
- cluster_state = get_node_status(module, node)
- module.exit_json(changed=True, out=cluster_state)
-
- if state in ['restart']:
- if module.check_mode:
- module.exit_json(changed=True)
- set_cluster(module, 'offline', timeout, force)
- cluster_state = get_cluster_status(module)
- if cluster_state == 'offline':
- set_cluster(module, 'online', timeout, force)
- cluster_state = get_cluster_status(module)
- if cluster_state == 'online':
- module.exit_json(changed=True, out=cluster_state)
- else:
- module.fail_json(msg="Failed during the restart of the cluster, the cluster can't be started")
- else:
- module.fail_json(msg="Failed during the restart of the cluster, the cluster can't be stopped")
-
- if state in ['cleanup']:
- if module.check_mode:
- module.exit_json(changed=True)
- clean_cluster(module, timeout)
- cluster_state = get_cluster_status(module)
- module.exit_json(changed=True,
- out=cluster_state)
+ PacemakerCluster.execute()
if __name__ == '__main__':
diff --git a/plugins/modules/pacemaker_resource.py b/plugins/modules/pacemaker_resource.py
new file mode 100644
index 0000000000..2fdf785487
--- /dev/null
+++ b/plugins/modules/pacemaker_resource.py
@@ -0,0 +1,217 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2025, Dexter Le
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r"""
+module: pacemaker_resource
+short_description: Manage pacemaker resources
+author:
+ - Dexter Le (@munchtoast)
+version_added: 10.5.0
+description:
+ - This module can manage resources in a Pacemaker cluster using the pacemaker CLI.
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ state:
+ description:
+ - Indicate desired state for cluster resource.
+ choices: [present, absent, enabled, disabled]
+ default: present
+ type: str
+ name:
+ description:
+ - Specify the resource name to create.
+ required: true
+ type: str
+ resource_type:
+ description:
+ - Resource type to create.
+ type: dict
+ suboptions:
+ resource_name:
+ description:
+ - Specify the resource type name.
+ type: str
+ resource_standard:
+ description:
+ - Specify the resource type standard.
+ type: str
+ resource_provider:
+ description:
+ - Specify the resource type providers.
+ type: str
+ resource_option:
+ description:
+ - Specify the resource option to create.
+ type: list
+ elements: str
+ default: []
+ resource_operation:
+ description:
+ - List of operations to associate with resource.
+ type: list
+ elements: dict
+ default: []
+ suboptions:
+ operation_action:
+ description:
+ - Operation action to associate with resource.
+ type: str
+ operation_option:
+ description:
+ - Operation option to associate with action.
+ type: list
+ elements: str
+ resource_meta:
+ description:
+ - List of meta to associate with resource.
+ type: list
+ elements: str
+ resource_argument:
+ description:
+ - Action to associate with resource.
+ type: dict
+ suboptions:
+ argument_action:
+ description:
+ - Action to apply to resource.
+ type: str
+ choices: [clone, master, group, promotable]
+ argument_option:
+ description:
+ - Options to associate with resource action.
+ type: list
+ elements: str
+ wait:
+ description:
+ - Timeout period for polling the resource creation.
+ type: int
+ default: 300
+"""
+
+EXAMPLES = r"""
+---
+- name: Create pacemaker resource
+ hosts: localhost
+ gather_facts: false
+ tasks:
+ - name: Create virtual-ip resource
+ community.general.pacemaker_resource:
+ state: present
+ name: virtual-ip
+ resource_type:
+ resource_name: IPaddr2
+ resource_option:
+ - "ip=[192.168.2.1]"
+ resource_argument:
+ argument_action: group
+ argument_option:
+ - master
+ resource_operation:
+ - operation_action: monitor
+ operation_option:
+ - interval=20
+"""
+
+RETURN = r"""
+cluster_resources:
+ description: The cluster resource output message.
+ type: str
+ sample: "Assumed agent name ocf:heartbeat:IPaddr2 (deduced from IPaddr2)"
+ returned: always
+"""
+
+from ansible_collections.community.general.plugins.module_utils.module_helper import StateModuleHelper
+from ansible_collections.community.general.plugins.module_utils.pacemaker import pacemaker_runner, get_pacemaker_maintenance_mode
+
+
+class PacemakerResource(StateModuleHelper):
+ module = dict(
+ argument_spec=dict(
+ state=dict(type='str', default='present', choices=[
+ 'present', 'absent', 'enabled', 'disabled']),
+ name=dict(type='str', required=True),
+ resource_type=dict(type='dict', options=dict(
+ resource_name=dict(type='str'),
+ resource_standard=dict(type='str'),
+ resource_provider=dict(type='str'),
+ )),
+ resource_option=dict(type='list', elements='str', default=list()),
+ resource_operation=dict(type='list', elements='dict', default=list(), options=dict(
+ operation_action=dict(type='str'),
+ operation_option=dict(type='list', elements='str'),
+ )),
+ resource_meta=dict(type='list', elements='str'),
+ resource_argument=dict(type='dict', options=dict(
+ argument_action=dict(type='str', choices=['clone', 'master', 'group', 'promotable']),
+ argument_option=dict(type='list', elements='str'),
+ )),
+ wait=dict(type='int', default=300),
+ ),
+ required_if=[('state', 'present', ['resource_type', 'resource_option'])],
+ supports_check_mode=True,
+ )
+
+ def __init_module__(self):
+ self.runner = pacemaker_runner(self.module)
+ self.vars.set('previous_value', self._get()['out'])
+ self.vars.set('value', self.vars.previous_value, change=True, diff=True)
+ self.module.params['name'] = self.module.params['name'] or None
+
+ def __quit_module__(self):
+ self.vars.set('value', self._get()['out'])
+
+ def _process_command_output(self, fail_on_err, ignore_err_msg=""):
+ def process(rc, out, err):
+ if fail_on_err and rc != 0 and err and ignore_err_msg not in err:
+ self.do_raise('pcs failed with error (rc={0}): {1}'.format(rc, err))
+ out = out.rstrip()
+ return None if out == "" else out
+ return process
+
+ def _get(self):
+ with self.runner('cli_action state name') as ctx:
+ result = ctx.run(cli_action="resource", state='status')
+ return dict([('rc', result[0]),
+ ('out', result[1] if result[1] != "" else None),
+ ('err', result[2])])
+
+ def state_absent(self):
+ force = get_pacemaker_maintenance_mode(self.runner)
+ with self.runner('cli_action state name force', output_process=self._process_command_output(True, "does not exist"), check_mode_skip=True) as ctx:
+ ctx.run(cli_action='resource', force=force)
+
+ def state_present(self):
+ with self.runner(
+ 'cli_action state name resource_type resource_option resource_operation resource_meta resource_argument wait',
+ output_process=self._process_command_output(not get_pacemaker_maintenance_mode(self.runner), "already exists"),
+ check_mode_skip=True) as ctx:
+ ctx.run(cli_action='resource')
+
+ def state_enabled(self):
+ with self.runner('cli_action state name', output_process=self._process_command_output(True, "Starting"), check_mode_skip=True) as ctx:
+ ctx.run(cli_action='resource')
+
+ def state_disabled(self):
+ with self.runner('cli_action state name', output_process=self._process_command_output(True, "Stopped"), check_mode_skip=True) as ctx:
+ ctx.run(cli_action='resource')
+
+
+def main():
+ PacemakerResource.execute()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/plugins/modules/packet_device.py b/plugins/modules/packet_device.py
index 13dbbb9ff3..f17db56c8c 100644
--- a/plugins/modules/packet_device.py
+++ b/plugins/modules/packet_device.py
@@ -111,8 +111,10 @@ options:
state:
description:
- Desired state of the device.
- - If set to V(present) (the default), the module call will return immediately after the device-creating HTTP request successfully returns.
- - If set to V(active), the module call will block until all the specified devices are in state active due to the Packet API, or until O(wait_timeout).
+ - If set to V(present) (the default), the module call returns immediately after the device-creating HTTP request successfully
+ returns.
+ - If set to V(active), the module call blocks until all the specified devices are in state active due to the Packet
+ API, or until O(wait_timeout).
choices: [present, absent, active, inactive, rebooted]
default: present
type: str
@@ -125,15 +127,16 @@ options:
wait_for_public_IPv:
description:
- Whether to wait for the instance to be assigned a public IPv4/IPv6 address.
- - If set to 4, it will wait until IPv4 is assigned to the instance.
- - If set to 6, wait until public IPv6 is assigned to the instance.
+ - If set to V(4), it waits until IPv4 is assigned to the instance.
+ - If set to V(6), it waits until public IPv6 is assigned to the instance.
choices: [4, 6]
type: int
wait_timeout:
description:
- How long (seconds) to wait either for automatic IP address assignment, or for the device to reach the V(active) state.
- - If O(wait_for_public_IPv) is set and O(state=active), the module will wait for both events consequently, applying the timeout twice.
+ - If O(wait_for_public_IPv) is set and O(state=active), the module waits for both events consequently, applying the
+ timeout twice.
default: 900
type: int
@@ -173,7 +176,7 @@ EXAMPLES = r"""
plan: baremetal_0
facility: sjc1
-# Create the same device and wait until it is in state "active", (when it's
+# Create the same device and wait until it is in state "active", (when it is
# ready for other API operations). Fail if the device is not "active" in
# 10 minutes.
@@ -255,26 +258,18 @@ EXAMPLES = r"""
"""
RETURN = r"""
-changed:
- description: True if a device was altered in any way (created, modified or removed).
- type: bool
- sample: true
- returned: success
-
devices:
- description: Information about each device that was processed
+ description: Information about each device that was processed.
type: list
sample:
- - {
- "hostname": "my-server.com",
- "id": "2a5122b9-c323-4d5c-b53c-9ad3f54273e7",
- "public_ipv4": "147.229.15.12",
- "private-ipv4": "10.0.15.12",
- "tags": [],
- "locked": false,
- "state": "provisioning",
- "public_ipv6": "2604:1380:2:5200::3"
- }
+ - "hostname": "my-server.com"
+ "id": "2a5122b9-c323-4d5c-b53c-9ad3f54273e7"
+ "public_ipv4": "147.229.15.12"
+ "private-ipv4": "10.0.15.12"
+ "tags": []
+ "locked": false
+ "state": "provisioning"
+ "public_ipv6": "2604:1380:2:5200::3"
returned: success
"""
@@ -419,12 +414,12 @@ def get_hostname_list(module):
# at this point, hostnames is a list
hostnames = [h.strip() for h in hostnames]
- if (len(hostnames) > 1) and (count > 1):
+ if len(hostnames) > 1 and count > 1:
_msg = ("If you set count>1, you should only specify one hostname "
"with the %d formatter, not a list of hostnames.")
raise Exception(_msg)
- if (len(hostnames) == 1) and (count > 0):
+ if len(hostnames) == 1 and count > 0:
hostname_spec = hostnames[0]
count_range = range(count_offset, count_offset + count)
if re.search(r"%\d{0,2}d", hostname_spec):
diff --git a/plugins/modules/packet_ip_subnet.py b/plugins/modules/packet_ip_subnet.py
index c2c9fcead4..0029623a10 100644
--- a/plugins/modules/packet_ip_subnet.py
+++ b/plugins/modules/packet_ip_subnet.py
@@ -75,10 +75,11 @@ options:
state:
description:
- Desired state of the IP subnet on the specified device.
- - With O(state=present), you must specify either O(hostname) or O(device_id). Subnet with given CIDR will then be assigned to the specified
- device.
- - With O(state=absent), you can specify either O(hostname) or O(device_id). The subnet will be removed from specified devices.
- - If you leave both O(hostname) and O(device_id) empty, the subnet will be removed from any device it's assigned to.
+ - With O(state=present), you must specify either O(hostname) or O(device_id). Subnet with given CIDR is then assigned
+ to the specified device.
+ - With O(state=absent), you can specify either O(hostname) or O(device_id). The subnet is then removed from specified
+ devices.
+ - If you leave both O(hostname) and O(device_id) empty, the subnet is then removed from any device it is assigned to.
choices: ['present', 'absent']
default: 'present'
type: str
@@ -122,12 +123,6 @@ EXAMPLES = r"""
"""
RETURN = r"""
-changed:
- description: True if an IP address assignments were altered in any way (created or removed).
- type: bool
- sample: true
- returned: success
-
device_id:
type: str
description: UUID of the device associated with the specified IP address.
diff --git a/plugins/modules/packet_project.py b/plugins/modules/packet_project.py
index f6acdec152..afadec36be 100644
--- a/plugins/modules/packet_project.py
+++ b/plugins/modules/packet_project.py
@@ -49,7 +49,7 @@ options:
auth_token:
description:
- - Packet api token. You can also supply it in environment variable E(PACKET_API_TOKEN).
+ - Packet API token. You can also supply it in environment variable E(PACKET_API_TOKEN).
type: str
name:
@@ -110,12 +110,6 @@ EXAMPLES = r"""
"""
RETURN = r"""
-changed:
- description: True if a project was created or removed.
- type: bool
- sample: true
- returned: success
-
name:
description: Name of addressed project.
type: str
diff --git a/plugins/modules/packet_sshkey.py b/plugins/modules/packet_sshkey.py
index 8172482108..ec76a17b4c 100644
--- a/plugins/modules/packet_sshkey.py
+++ b/plugins/modules/packet_sshkey.py
@@ -35,7 +35,7 @@ options:
type: str
label:
description:
- - Label for the key. If you keep it empty, it will be read from key string.
+ - Label for the key. If you keep it empty, it is read from key string.
type: str
aliases: [name]
id:
@@ -85,15 +85,11 @@ EXAMPLES = r"""
"""
RETURN = r"""
-changed:
- description: True if a sshkey was created or removed.
- type: bool
- sample: true
- returned: always
sshkeys:
- description: Information about sshkeys that were created/removed.
- type: list
- sample: [
+ description: Information about sshkeys that were created/removed.
+ type: list
+ sample:
+ [
{
"fingerprint": "5c:93:74:7c:ed:07:17:62:28:75:79:23:d6:08:93:46",
"id": "41d61bd8-3342-428b-a09c-e67bdd18a9b7",
@@ -101,7 +97,7 @@ sshkeys:
"label": "mynewkey33"
}
]
- returned: always
+ returned: always
"""
import os
diff --git a/plugins/modules/packet_volume_attachment.py b/plugins/modules/packet_volume_attachment.py
index 0423cc879d..7537c1c3fe 100644
--- a/plugins/modules/packet_volume_attachment.py
+++ b/plugins/modules/packet_volume_attachment.py
@@ -18,8 +18,8 @@ short_description: Attach/detach a volume to a device in the Packet host
description:
- Attach/detach a volume to a device in the Packet host.
- API is documented at U(https://www.packet.com/developers/api/volumes/).
- - This module creates the attachment route in the Packet API. In order to discover the block devices on the server, you have to run the Attach
- Scripts, as documented at U(https://help.packet.net/technical/storage/packet-block-storage-linux).
+ - This module creates the attachment route in the Packet API. In order to discover the block devices on the server, you
+ have to run the Attach Scripts, as documented at U(https://help.packet.net/technical/storage/packet-block-storage-linux).
version_added: '0.2.0'
author:
diff --git a/plugins/modules/pacman.py b/plugins/modules/pacman.py
index a4a9370ae0..359cbc51d1 100644
--- a/plugins/modules/pacman.py
+++ b/plugins/modules/pacman.py
@@ -32,7 +32,8 @@ attributes:
options:
name:
description:
- - Name or list of names of the package(s) or file(s) to install, upgrade, or remove. Cannot be used in combination with O(upgrade).
+ - Name or list of names of the package(s) or file(s) to install, upgrade, or remove. Cannot be used in combination with
+ O(upgrade).
aliases: [package, pkg]
type: list
elements: str
@@ -40,9 +41,9 @@ options:
state:
description:
- Whether to install (V(present) or V(installed), V(latest)), or remove (V(absent) or V(removed)) a package.
- - V(present) and V(installed) will simply ensure that a desired package is installed.
- - V(latest) will update the specified package if it is not of the latest available version.
- - V(absent) and V(removed) will remove the specified package.
+ - V(present) and V(installed) simply ensure that a desired package is installed.
+ - V(latest) updates the specified package if it is not of the latest available version.
+ - V(absent) and V(removed) remove the specified package.
default: present
choices: [absent, installed, latest, present, removed]
type: str
@@ -50,7 +51,8 @@ options:
force:
description:
- When removing packages, forcefully remove them, without any checks. Same as O(extra_args="--nodeps --nodeps").
- - When combined with O(update_cache), force a refresh of all package databases. Same as O(update_cache_extra_args="--refresh --refresh").
+ - When combined with O(update_cache), force a refresh of all package databases. Same as O(update_cache_extra_args="--refresh
+ --refresh").
default: false
type: bool
@@ -64,8 +66,8 @@ options:
executable:
description:
- Path of the binary to use. This can either be C(pacman) or a pacman compatible AUR helper.
- - Pacman compatibility is unfortunately ill defined, in particular, this modules makes extensive use of the C(--print-format) directive
- which is known not to be implemented by some AUR helpers (notably, C(yay)).
+ - Pacman compatibility is unfortunately ill defined, in particular, this modules makes extensive use of the C(--print-format)
+ directive which is known not to be implemented by some AUR helpers (notably, C(yay)).
- Beware that AUR helpers might behave unexpectedly and are therefore not recommended.
default: pacman
type: str
@@ -82,8 +84,8 @@ options:
- Whether or not to refresh the master package lists.
- This can be run as part of a package installation or as a separate step.
- If not specified, it defaults to V(false).
- - Please note that this option only had an influence on the module's C(changed) state if O(name) and O(upgrade) are not specified before
- community.general 5.0.0. See the examples for how to keep the old behavior.
+ - Please note that this option only had an influence on the module's C(changed) state if O(name) and O(upgrade) are
+ not specified before community.general 5.0.0. See the examples for how to keep the old behavior.
type: bool
update_cache_extra_args:
@@ -114,31 +116,33 @@ options:
reason_for:
description:
- Set the install reason for V(all) packages or only for V(new) packages.
- - In case of O(state=latest) already installed packages which will be updated to a newer version are not counted as V(new).
+ - In case of O(state=latest) already installed packages which are updated to a newer version are not counted as V(new).
default: new
choices: [all, new]
type: str
version_added: 5.4.0
notes:
- - When used with a C(loop:) each package will be processed individually, it is much more efficient to pass the list directly to the O(name)
- option.
- - To use an AUR helper (O(executable) option), a few extra setup steps might be required beforehand. For example, a dedicated build user with
- permissions to install packages could be necessary.
- - 'In the tests, while using C(yay) as the O(executable) option, the module failed to install AUR packages with the error: C(error: target not
- found: ).'
+ - When used with a C(loop:) each package is processed individually, it is much more efficient to pass the list directly
+ to the O(name) option.
+ - To use an AUR helper (O(executable) option), a few extra setup steps might be required beforehand. For example, a dedicated
+ build user with permissions to install packages could be necessary.
+ - 'In the tests, while using C(yay) as the O(executable) option, the module failed to install AUR packages with the error:
+ C(error: target not found: ).'
+ - The common return values `stdout` and `stderr` are returned upon success, when needed, since community.general 4.1.0.
"""
RETURN = r"""
packages:
description:
- A list of packages that have been changed.
- - Before community.general 4.5.0 this was only returned when O(upgrade=true). In community.general 4.5.0, it was sometimes omitted when the
- package list is empty, but since community.general 4.6.0 it is always returned when O(name) is specified or O(upgrade=true).
+ - Before community.general 4.5.0 this was only returned when O(upgrade=true). In community.general 4.5.0, it was sometimes
+ omitted when the package list is empty, but since community.general 4.6.0 it is always returned when O(name) is specified
+ or O(upgrade=true).
returned: success and O(name) is specified or O(upgrade=true)
type: list
elements: str
- sample: [package, other-package]
+ sample: ["package", "other-package"]
cache_updated:
description:
@@ -148,22 +152,6 @@ cache_updated:
type: bool
sample: false
version_added: 4.6.0
-
-stdout:
- description:
- - Output from pacman.
- returned: success, when needed
- type: str
- sample: ":: Synchronizing package databases... core is up to date :: Starting full system upgrade..."
- version_added: 4.1.0
-
-stderr:
- description:
- - Error output from pacman.
- returned: success, when needed
- type: str
- sample: "warning: libtool: local (2.4.6+44+gb9b44533-14) is newer than core (2.4.6+42+gb88cebd5-15)\nwarning ..."
- version_added: 4.1.0
"""
EXAMPLES = r"""
diff --git a/plugins/modules/pacman_key.py b/plugins/modules/pacman_key.py
index f98fb6f8a3..851655f9fc 100644
--- a/plugins/modules/pacman_key.py
+++ b/plugins/modules/pacman_key.py
@@ -18,9 +18,9 @@ description:
- Add or remove gpg keys from the pacman keyring.
notes:
- Use full-length key ID (40 characters).
- - Keys will be verified when using O(data), O(file), or O(url) unless O(verify) is overridden.
- - Keys will be locally signed after being imported into the keyring.
- - If the key ID exists in the keyring, the key will not be added unless O(force_update) is specified.
+ - Keys are verified when using O(data), O(file), or O(url) unless O(verify) is overridden.
+ - Keys are locally signed after being imported into the keyring.
+ - If the key ID exists in the keyring, the key is not added unless O(force_update) is specified.
- O(data), O(file), O(url), and O(keyserver) are mutually exclusive.
requirements:
- gpg
@@ -72,16 +72,22 @@ options:
keyring:
description:
- The full path to the keyring folder on the remote server.
- - If not specified, module will use pacman's default (V(/etc/pacman.d/gnupg)).
+ - If not specified, module uses pacman's default (V(/etc/pacman.d/gnupg)).
- Useful if the remote system requires an alternative gnupg directory.
type: path
default: /etc/pacman.d/gnupg
state:
description:
- - Ensures that the key is present (added) or absent (revoked).
+ - Ensures that the key is V(present) (added) or V(absent) (revoked).
default: present
choices: [absent, present]
type: str
+ ensure_trusted:
+ description:
+ - Ensure that the key is trusted (signed by the Pacman machine key and not expired).
+ type: bool
+ default: false
+ version_added: 11.0.0
"""
EXAMPLES = r"""
@@ -129,12 +135,55 @@ from ansible.module_utils.urls import fetch_url
from ansible.module_utils.common.text.converters import to_native
+class GpgListResult(object):
+ """Wraps gpg --list-* output."""
+
+ def __init__(self, line):
+ self._parts = line.split(':')
+
+ @property
+ def kind(self):
+ return self._parts[0]
+
+ @property
+ def valid(self):
+ return self._parts[1]
+
+ @property
+ def is_fully_valid(self):
+ return self.valid == 'f'
+
+ @property
+ def key(self):
+ return self._parts[4]
+
+ @property
+ def user_id(self):
+ return self._parts[9]
+
+
+def gpg_get_first_attr_of_kind(lines, kind, attr):
+ for line in lines:
+ glr = GpgListResult(line)
+ if glr.kind == kind:
+ return getattr(glr, attr)
+
+
+def gpg_get_all_attrs_of_kind(lines, kind, attr):
+ result = []
+ for line in lines:
+ glr = GpgListResult(line)
+ if glr.kind == kind:
+ result.append(getattr(glr, attr))
+ return result
+
+
class PacmanKey(object):
def __init__(self, module):
self.module = module
# obtain binary paths for gpg & pacman-key
- self.gpg = module.get_bin_path('gpg', required=True)
- self.pacman_key = module.get_bin_path('pacman-key', required=True)
+ self.gpg_binary = module.get_bin_path('gpg', required=True)
+ self.pacman_key_binary = module.get_bin_path('pacman-key', required=True)
# obtain module parameters
keyid = module.params['id']
@@ -146,47 +195,71 @@ class PacmanKey(object):
force_update = module.params['force_update']
keyring = module.params['keyring']
state = module.params['state']
+ ensure_trusted = module.params['ensure_trusted']
self.keylength = 40
# sanitise key ID & check if key exists in the keyring
keyid = self.sanitise_keyid(keyid)
- key_present = self.key_in_keyring(keyring, keyid)
+ key_validity = self.key_validity(keyring, keyid)
+ key_present = len(key_validity) > 0
+ key_valid = any(key_validity)
# check mode
if module.check_mode:
- if state == "present":
+ if state == 'present':
changed = (key_present and force_update) or not key_present
+ if not changed and ensure_trusted:
+ changed = not (key_valid and self.key_is_trusted(keyring, keyid))
module.exit_json(changed=changed)
- elif state == "absent":
- if key_present:
- module.exit_json(changed=True)
- module.exit_json(changed=False)
+ if state == 'absent':
+ module.exit_json(changed=key_present)
- if state == "present":
- if key_present and not force_update:
+ if state == 'present':
+ trusted = key_valid and self.key_is_trusted(keyring, keyid)
+ if not force_update and key_present and (not ensure_trusted or trusted):
module.exit_json(changed=False)
-
+ changed = False
if data:
file = self.save_key(data)
self.add_key(keyring, file, keyid, verify)
- module.exit_json(changed=True)
+ changed = True
elif file:
self.add_key(keyring, file, keyid, verify)
- module.exit_json(changed=True)
+ changed = True
elif url:
data = self.fetch_key(url)
file = self.save_key(data)
self.add_key(keyring, file, keyid, verify)
- module.exit_json(changed=True)
+ changed = True
elif keyserver:
self.recv_key(keyring, keyid, keyserver)
- module.exit_json(changed=True)
- elif state == "absent":
+ changed = True
+ if changed or (ensure_trusted and not trusted):
+ self.lsign_key(keyring=keyring, keyid=keyid)
+ changed = True
+ module.exit_json(changed=changed)
+ elif state == 'absent':
if key_present:
self.remove_key(keyring, keyid)
module.exit_json(changed=True)
module.exit_json(changed=False)
+ def gpg(self, args, keyring=None, **kwargs):
+ cmd = [self.gpg_binary]
+ if keyring:
+ cmd.append('--homedir={keyring}'.format(keyring=keyring))
+ cmd.extend(['--no-permission-warning', '--with-colons', '--quiet', '--batch', '--no-tty'])
+ return self.module.run_command(cmd + args, **kwargs)
+
+ def pacman_key(self, args, keyring, **kwargs):
+ return self.module.run_command(
+ [self.pacman_key_binary, '--gpgdir', keyring] + args,
+ **kwargs)
+
+ def pacman_machine_key(self, keyring):
+ unused_rc, stdout, unused_stderr = self.gpg(['--list-secret-key'], keyring=keyring)
+ return gpg_get_first_attr_of_kind(stdout.splitlines(), 'sec', 'key')
+
def is_hexadecimal(self, string):
"""Check if a given string is valid hexadecimal"""
try:
@@ -216,14 +289,11 @@ class PacmanKey(object):
def recv_key(self, keyring, keyid, keyserver):
"""Receives key via keyserver"""
- cmd = [self.pacman_key, '--gpgdir', keyring, '--keyserver', keyserver, '--recv-keys', keyid]
- self.module.run_command(cmd, check_rc=True)
- self.lsign_key(keyring, keyid)
+ self.pacman_key(['--keyserver', keyserver, '--recv-keys', keyid], keyring=keyring, check_rc=True)
def lsign_key(self, keyring, keyid):
"""Locally sign key"""
- cmd = [self.pacman_key, '--gpgdir', keyring]
- self.module.run_command(cmd + ['--lsign-key', keyid], check_rc=True)
+ self.pacman_key(['--lsign-key', keyid], keyring=keyring, check_rc=True)
def save_key(self, data):
"Saves key data to a temporary file"
@@ -238,14 +308,11 @@ class PacmanKey(object):
"""Add key to pacman's keyring"""
if verify:
self.verify_keyfile(keyfile, keyid)
- cmd = [self.pacman_key, '--gpgdir', keyring, '--add', keyfile]
- self.module.run_command(cmd, check_rc=True)
- self.lsign_key(keyring, keyid)
+ self.pacman_key(['--add', keyfile], keyring=keyring, check_rc=True)
def remove_key(self, keyring, keyid):
"""Remove key from pacman's keyring"""
- cmd = [self.pacman_key, '--gpgdir', keyring, '--delete', keyid]
- self.module.run_command(cmd, check_rc=True)
+ self.pacman_key(['--delete', keyid], keyring=keyring, check_rc=True)
def verify_keyfile(self, keyfile, keyid):
"""Verify that keyfile matches the specified key ID"""
@@ -254,48 +321,29 @@ class PacmanKey(object):
elif keyid is None:
self.module.fail_json(msg="expected a key ID, got none")
- rc, stdout, stderr = self.module.run_command(
- [
- self.gpg,
- '--with-colons',
- '--with-fingerprint',
- '--batch',
- '--no-tty',
- '--show-keys',
- keyfile
- ],
+ rc, stdout, stderr = self.gpg(
+ ['--with-fingerprint', '--show-keys', keyfile],
check_rc=True,
)
- extracted_keyid = None
- for line in stdout.splitlines():
- if line.startswith('fpr:'):
- extracted_keyid = line.split(':')[9]
- break
-
+ extracted_keyid = gpg_get_first_attr_of_kind(stdout.splitlines(), 'fpr', 'user_id')
if extracted_keyid != keyid:
self.module.fail_json(msg="key ID does not match. expected %s, got %s" % (keyid, extracted_keyid))
- def key_in_keyring(self, keyring, keyid):
- "Check if the key ID is in pacman's keyring"
- rc, stdout, stderr = self.module.run_command(
- [
- self.gpg,
- '--with-colons',
- '--batch',
- '--no-tty',
- '--no-default-keyring',
- '--keyring=%s/pubring.gpg' % keyring,
- '--list-keys', keyid
- ],
- check_rc=False,
- )
+ def key_validity(self, keyring, keyid):
+ "Check if the key ID is in pacman's keyring and not expired"
+ rc, stdout, stderr = self.gpg(['--no-default-keyring', '--list-keys', keyid], keyring=keyring, check_rc=False)
if rc != 0:
if stderr.find("No public key") >= 0:
- return False
+ return []
else:
self.module.fail_json(msg="gpg returned an error: %s" % stderr)
- return True
+ return gpg_get_all_attrs_of_kind(stdout.splitlines(), 'uid', 'is_fully_valid')
+
+ def key_is_trusted(self, keyring, keyid):
+ """Check if key is signed and not expired."""
+ unused_rc, stdout, unused_stderr = self.gpg(['--check-signatures', keyid], keyring=keyring)
+ return self.pacman_machine_key(keyring) in gpg_get_all_attrs_of_kind(stdout.splitlines(), 'sig', 'key')
def main():
@@ -309,6 +357,7 @@ def main():
verify=dict(type='bool', default=True),
force_update=dict(type='bool', default=False),
keyring=dict(type='path', default='/etc/pacman.d/gnupg'),
+ ensure_trusted=dict(type='bool', default=False),
state=dict(type='str', default='present', choices=['absent', 'present']),
),
supports_check_mode=True,
diff --git a/plugins/modules/pagerduty.py b/plugins/modules/pagerduty.py
index 8d83374c34..78443e8410 100644
--- a/plugins/modules/pagerduty.py
+++ b/plugins/modules/pagerduty.py
@@ -13,7 +13,7 @@ DOCUMENTATION = r"""
module: pagerduty
short_description: Create PagerDuty maintenance windows
description:
- - This module will let you create PagerDuty maintenance windows.
+ - This module lets you create PagerDuty maintenance windows.
author:
- "Andrew Newdigate (@suprememoocow)"
- "Dylan Silva (@thaumos)"
@@ -79,7 +79,8 @@ options:
default: Created by Ansible
validate_certs:
description:
- - If V(false), SSL certificates will not be validated. This should only be used on personally controlled sites using self-signed certificates.
+ - If V(false), SSL certificates are not validated. This should only be used on personally controlled sites using self-signed
+ certificates.
type: bool
default: true
"""
@@ -202,7 +203,7 @@ class PagerDutyRequest(object):
return False, json_out, True
def _create_services_payload(self, service):
- if (isinstance(service, list)):
+ if isinstance(service, list):
return [{'id': s, 'type': 'service_reference'} for s in service]
else:
return [{'id': service, 'type': 'service_reference'}]
@@ -241,15 +242,15 @@ def main():
module = AnsibleModule(
argument_spec=dict(
state=dict(required=True, choices=['running', 'started', 'ongoing', 'absent']),
- name=dict(required=False),
- user=dict(required=False),
+ name=dict(),
+ user=dict(),
token=dict(required=True, no_log=True),
- service=dict(required=False, type='list', elements='str', aliases=["services"]),
- window_id=dict(required=False),
- requester_id=dict(required=False),
- hours=dict(default='1', required=False), # @TODO change to int?
- minutes=dict(default='0', required=False), # @TODO change to int?
- desc=dict(default='Created by Ansible', required=False),
+ service=dict(type='list', elements='str', aliases=["services"]),
+ window_id=dict(),
+ requester_id=dict(),
+ hours=dict(default='1'), # @TODO change to int?
+ minutes=dict(default='0'), # @TODO change to int?
+ desc=dict(default='Created by Ansible'),
validate_certs=dict(default=True, type='bool'),
)
)
diff --git a/plugins/modules/pagerduty_alert.py b/plugins/modules/pagerduty_alert.py
index 050dcd17e9..e3d93e8718 100644
--- a/plugins/modules/pagerduty_alert.py
+++ b/plugins/modules/pagerduty_alert.py
@@ -12,7 +12,7 @@ DOCUMENTATION = r"""
module: pagerduty_alert
short_description: Trigger, acknowledge or resolve PagerDuty incidents
description:
- - This module will let you trigger, acknowledge or resolve a PagerDuty incident by sending events.
+ - This module lets you trigger, acknowledge or resolve a PagerDuty incident by sending events.
author:
- "Amanpreet Singh (@ApsOps)"
- "Xiao Shen (@xshen1)"
@@ -43,7 +43,7 @@ options:
service_id:
type: str
description:
- - ID of PagerDuty service when incidents will be triggered, acknowledged or resolved.
+ - ID of PagerDuty service when incidents are triggered, acknowledged or resolved.
- Required if O(api_version=v1).
service_key:
type: str
@@ -73,68 +73,58 @@ options:
type: str
description:
- The name of the monitoring client that is triggering this event.
- required: false
client_url:
type: str
description:
- The URL of the monitoring client that is triggering this event.
- required: false
component:
type: str
description:
- Component of the source machine that is responsible for the event, for example C(mysql) or C(eth0).
- required: false
version_added: 7.4.0
custom_details:
type: dict
description:
- Additional details about the event and affected system.
- A dictionary with custom keys and values.
- required: false
version_added: 7.4.0
desc:
type: str
description:
- - For O(state=triggered) - Required. Short description of the problem that led to this trigger. This field (or a truncated version) will
- be used when generating phone calls, SMS messages and alert emails. It will also appear on the incidents tables in the PagerDuty UI. The
- maximum length is 1024 characters.
- - For O(state=acknowledged) or O(state=resolved) - Text that will appear in the incident's log associated with this event.
- required: false
+ - For O(state=triggered) - Required. Short description of the problem that led to this trigger. This field (or a truncated
+ version) is used when generating phone calls, SMS messages and alert emails. It also appears on the incidents tables
+ in the PagerDuty UI. The maximum length is 1024 characters.
+ - For O(state=acknowledged) or O(state=resolved) - Text that appears in the incident's log associated with this event.
default: Created via Ansible
incident_class:
type: str
description:
- The class/type of the event, for example C(ping failure) or C(cpu load).
- required: false
version_added: 7.4.0
incident_key:
type: str
description:
- Identifies the incident to which this O(state) should be applied.
- - For O(state=triggered) - If there is no open (in other words unresolved) incident with this key, a new one will be created. If there is already an
- open incident with a matching key, this event will be appended to that incident's log. The event key provides an easy way to 'de-dup'
- problem reports. If no O(incident_key) is provided, then it will be generated by PagerDuty.
- - For O(state=acknowledged) or O(state=resolved) - This should be the incident_key you received back when the incident was first opened
- by a trigger event. Acknowledge events referencing resolved or nonexistent incidents will be discarded.
- required: false
+ - For O(state=triggered) - If there is no open (in other words unresolved) incident with this key, a new one is created.
+ If there is already an open incident with a matching key, this event is appended to that incident's log. The event
+ key provides an easy way to 'de-dup' problem reports. If no O(incident_key) is provided, then it is generated by PagerDuty.
+ - For O(state=acknowledged) or O(state=resolved) - This should be the incident_key you received back when the incident
+ was first opened by a trigger event. Acknowledge events referencing resolved or nonexistent incidents is discarded.
link_url:
type: str
description:
- Relevant link URL to the alert. For example, the website or the job link.
- required: false
version_added: 7.4.0
link_text:
type: str
description:
- A short description of the O(link_url).
- required: false
version_added: 7.4.0
source:
type: str
description:
- The unique location of the affected system, preferably a hostname or FQDN.
- Required in case of O(state=trigger) and O(api_version=v2).
- required: false
version_added: 7.4.0
severity:
type: str
@@ -330,25 +320,25 @@ def send_event_v2(module, service_key, event_type, payload, link,
def main():
module = AnsibleModule(
argument_spec=dict(
- name=dict(required=False),
- api_key=dict(required=False, no_log=True),
- integration_key=dict(required=False, no_log=True),
- service_id=dict(required=False),
- service_key=dict(required=False, no_log=True),
+ name=dict(),
+ api_key=dict(no_log=True),
+ integration_key=dict(no_log=True),
+ service_id=dict(),
+ service_key=dict(no_log=True),
state=dict(
required=True, choices=['triggered', 'acknowledged', 'resolved']
),
api_version=dict(type='str', default='v1', choices=['v1', 'v2']),
- client=dict(required=False),
- client_url=dict(required=False),
- component=dict(required=False),
- custom_details=dict(required=False, type='dict'),
- desc=dict(required=False, default='Created via Ansible'),
- incident_class=dict(required=False),
- incident_key=dict(required=False, no_log=False),
- link_url=dict(required=False),
- link_text=dict(required=False),
- source=dict(required=False),
+ client=dict(),
+ client_url=dict(),
+ component=dict(),
+ custom_details=dict(type='dict'),
+ desc=dict(default='Created via Ansible'),
+ incident_class=dict(),
+ incident_key=dict(no_log=False),
+ link_url=dict(),
+ link_text=dict(),
+ source=dict(),
severity=dict(
default='critical', choices=['critical', 'warning', 'error', 'info']
),
diff --git a/plugins/modules/pagerduty_change.py b/plugins/modules/pagerduty_change.py
index 39353f7575..de77016969 100644
--- a/plugins/modules/pagerduty_change.py
+++ b/plugins/modules/pagerduty_change.py
@@ -13,8 +13,8 @@ module: pagerduty_change
short_description: Track a code or infrastructure change as a PagerDuty change event
version_added: 1.3.0
description:
- - This module will let you create a PagerDuty change event each time the module is run.
- - This is not an idempotent action and a new change event will be created each time it is run.
+ - This module lets you create a PagerDuty change event each time the module is run.
+ - This is not an idempotent action and a new change event is created each time it is run.
author:
- Adam Vaughan (@adamvaughan)
requirements:
@@ -31,7 +31,8 @@ attributes:
options:
integration_key:
description:
- - The integration key that identifies the service the change was made to. This can be found by adding an integration to a service in PagerDuty.
+ - The integration key that identifies the service the change was made to. This can be found by adding an integration
+ to a service in PagerDuty.
required: true
type: str
summary:
@@ -81,8 +82,8 @@ options:
type: str
validate_certs:
description:
- - If V(false), SSL certificates for the target URL will not be validated. This should only be used on personally controlled sites using
- self-signed certificates.
+ - If V(false), SSL certificates for the target URL are not validated. This should only be used on personally controlled
+ sites using self-signed certificates.
required: false
default: true
type: bool
@@ -120,15 +121,14 @@ def main():
argument_spec=dict(
integration_key=dict(required=True, type='str', no_log=True),
summary=dict(required=True, type='str'),
- source=dict(required=False, default='Ansible', type='str'),
- user=dict(required=False, type='str'),
- repo=dict(required=False, type='str'),
- revision=dict(required=False, type='str'),
- environment=dict(required=False, type='str'),
- link_url=dict(required=False, type='str'),
- link_text=dict(required=False, type='str'),
- url=dict(required=False,
- default='https://events.pagerduty.com/v2/change/enqueue', type='str'),
+ source=dict(default='Ansible', type='str'),
+ user=dict(type='str'),
+ repo=dict(type='str'),
+ revision=dict(type='str'),
+ environment=dict(type='str'),
+ link_url=dict(type='str'),
+ link_text=dict(type='str'),
+ url=dict(default='https://events.pagerduty.com/v2/change/enqueue', type='str'),
validate_certs=dict(default=True, type='bool')
),
supports_check_mode=True
diff --git a/plugins/modules/pagerduty_user.py b/plugins/modules/pagerduty_user.py
index e03342c792..0830af97f3 100644
--- a/plugins/modules/pagerduty_user.py
+++ b/plugins/modules/pagerduty_user.py
@@ -188,7 +188,7 @@ def main():
state=dict(type='str', default='present', choices=['present', 'absent']),
pd_role=dict(type='str', default='responder',
choices=['global_admin', 'manager', 'responder', 'observer', 'stakeholder', 'limited_stakeholder', 'restricted_access']),
- pd_teams=dict(type='list', elements='str', required=False)),
+ pd_teams=dict(type='list', elements='str')),
required_if=[['state', 'present', ['pd_teams']], ],
supports_check_mode=True,
)
diff --git a/plugins/modules/pam_limits.py b/plugins/modules/pam_limits.py
index 516b61fec1..536ba59662 100644
--- a/plugins/modules/pam_limits.py
+++ b/plugins/modules/pam_limits.py
@@ -73,21 +73,24 @@ options:
required: true
backup:
description:
- - Create a backup file including the timestamp information so you can get the original file back if you somehow clobbered it incorrectly.
+ - Create a backup file including the timestamp information so you can get the original file back if you somehow clobbered
+ it incorrectly.
required: false
type: bool
default: false
use_min:
description:
- - If set to V(true), the minimal value will be used or conserved.
- - If the specified value is inferior to the value in the file, file content is replaced with the new value, else content is not modified.
+ - If set to V(true), the minimal value is used or conserved.
+ - If the specified value is inferior to the value in the file, file content is replaced with the new value, else content
+ is not modified.
required: false
type: bool
default: false
use_max:
description:
- - If set to V(true), the maximal value will be used or conserved.
- - If the specified value is superior to the value in the file, file content is replaced with the new value, else content is not modified.
+ - If set to V(true), the maximal value is used or conserved.
+ - If the specified value is superior to the value in the file, file content is replaced with the new value, else content
+ is not modified.
required: false
type: bool
default: false
@@ -180,7 +183,7 @@ def main():
use_min=dict(default=False, type='bool'),
backup=dict(default=False, type='bool'),
dest=dict(default=limits_conf, type='str'),
- comment=dict(required=False, default='', type='str')
+ comment=dict(default='', type='str')
),
supports_check_mode=True,
)
diff --git a/plugins/modules/pamd.py b/plugins/modules/pamd.py
index 6502922bc1..327316aa37 100644
--- a/plugins/modules/pamd.py
+++ b/plugins/modules/pamd.py
@@ -16,7 +16,8 @@ author:
short_description: Manage PAM Modules
description:
- Edit PAM service's type, control, module path and module arguments.
- - In order for a PAM rule to be modified, the type, control and module_path must match an existing rule. See man(5) pam.d for details.
+ - In order for a PAM rule to be modified, the type, control and module_path must match an existing rule. See man(5) pam.d
+ for details.
notes:
- This module does not handle authselect profiles.
extends_documentation_fragment:
@@ -67,20 +68,20 @@ options:
type: str
module_arguments:
description:
- - When O(state=updated), the O(module_arguments) will replace existing module_arguments.
- - When O(state=args_absent) args matching those listed in O(module_arguments) will be removed.
+ - When O(state=updated), the O(module_arguments) replace existing module_arguments.
+ - When O(state=args_absent) args matching those listed in O(module_arguments) are removed.
- When O(state=args_present) any args listed in O(module_arguments) are added if missing from the existing rule.
- - Furthermore, if the module argument takes a value denoted by C(=), the value will be changed to that specified in module_arguments.
+ - Furthermore, if the module argument takes a value denoted by C(=), the value changes to that specified in module_arguments.
type: list
elements: str
state:
description:
- - The default of V(updated) will modify an existing rule if type, control and module_path all match an existing rule.
- - With V(before), the new rule will be inserted before a rule matching type, control and module_path.
- - Similarly, with V(after), the new rule will be inserted after an existing rulematching type, control and module_path.
+ - The default of V(updated) modifies an existing rule if type, control and module_path all match an existing rule.
+ - With V(before), the new rule is inserted before a rule matching type, control and module_path.
+ - Similarly, with V(after), the new rule is inserted after an existing rulematching type, control and module_path.
- With either V(before) or V(after) O(new_type), O(new_control), and O(new_module_path) must all be specified.
- - If state is V(args_absent) or V(args_present), O(new_type), O(new_control), and O(new_module_path) will be ignored.
- - State V(absent) will remove the rule.
+ - If state is V(args_absent) or V(args_present), O(new_type), O(new_control), and O(new_module_path) are ignored.
+ - State V(absent) removes the rule.
type: str
choices: [absent, before, after, args_absent, args_present, updated]
default: updated
@@ -91,7 +92,8 @@ options:
default: /etc/pam.d
backup:
description:
- - Create a backup file including the timestamp information so you can get the original file back if you somehow clobbered it incorrectly.
+ - Create a backup file including the timestamp information so you can get the original file back if you somehow clobbered
+ it incorrectly.
type: bool
default: false
"""
@@ -151,11 +153,7 @@ EXAMPLES = r"""
type: auth
control: required
module_path: pam_faillock.so
- module_arguments: 'preauth
- silent
- deny=3
- unlock_time=604800
- fail_interval=900'
+ module_arguments: 'preauth silent deny=3 unlock_time=604800 fail_interval=900'
state: updated
- name: Remove specific arguments from a rule
diff --git a/plugins/modules/parted.py b/plugins/modules/parted.py
index 43c34ff9e5..4bf0897afc 100644
--- a/plugins/modules/parted.py
+++ b/plugins/modules/parted.py
@@ -15,8 +15,8 @@ author:
module: parted
short_description: Configure block device partitions
description:
- - This module allows configuring block device partition using the C(parted) command line tool. For a full description of the fields and the
- options check the GNU parted manual.
+ - This module allows configuring block device partition using the C(parted) command line tool. For a full description of
+ the fields and the options check the GNU parted manual.
requirements:
- This module requires C(parted) version 1.8.3 and above.
- Option O(align) (except V(undefined)) requires C(parted) 2.1 or above.
@@ -33,7 +33,8 @@ options:
device:
description:
- The block device (disk) where to operate.
- - Regular files can also be partitioned, but it is recommended to create a loopback device using C(losetup) to easily access its partitions.
+ - Regular files can also be partitioned, but it is recommended to create a loopback device using C(losetup) to easily
+ access its partitions.
type: str
required: true
align:
@@ -49,8 +50,8 @@ options:
type: int
unit:
description:
- - Selects the current default unit that Parted will use to display locations and capacities on the disk and to interpret those given by
- the user if they are not suffixed by an unit.
+ - Selects the current default unit that Parted uses to display locations and capacities on the disk and to interpret
+ those given by the user if they are not suffixed by an unit.
- When fetching information about a disk, it is recommended to always specify a unit.
type: str
choices: [s, B, KB, KiB, MB, MiB, GB, GiB, TB, TiB, '%', cyl, chs, compact]
@@ -58,7 +59,7 @@ options:
label:
description:
- Disk label type or partition table to use.
- - If O(device) already contains a different label, it will be changed to O(label) and any previous partitions will be lost.
+ - If O(device) already contains a different label, it is changed to O(label) and any previous partitions are lost.
- A O(name) must be specified for a V(gpt) partition table.
type: str
choices: [aix, amiga, bsd, dvh, gpt, loop, mac, msdos, pc98, sun]
@@ -72,17 +73,19 @@ options:
default: primary
part_start:
description:
- - Where the partition will start as offset from the beginning of the disk, that is, the "distance" from the start of the disk. Negative
- numbers specify distance from the end of the disk.
- - The distance can be specified with all the units supported by parted (except compat) and it is case sensitive, for example V(10GiB), V(15%).
+ - Where the partition starts as offset from the beginning of the disk, that is, the "distance" from the start of the
+ disk. Negative numbers specify distance from the end of the disk.
+ - The distance can be specified with all the units supported by parted (except compat) and it is case sensitive, for
+ example V(10GiB), V(15%).
- Using negative values may require setting of O(fs_type) (see notes).
type: str
default: 0%
part_end:
description:
- - Where the partition will end as offset from the beginning of the disk, that is, the "distance" from the start of the disk. Negative numbers
- specify distance from the end of the disk.
- - The distance can be specified with all the units supported by parted (except compat) and it is case sensitive, for example V(10GiB), V(15%).
+ - Where the partition ends as offset from the beginning of the disk, that is, the "distance" from the start of the disk.
+ Negative numbers specify distance from the end of the disk.
+ - The distance can be specified with all the units supported by parted (except compat) and it is case sensitive, for
+ example V(10GiB), V(15%).
type: str
default: 100%
name:
@@ -96,13 +99,13 @@ options:
state:
description:
- Whether to create or delete a partition.
- - If set to V(info) the module will only return the device information.
+ - If set to V(info) the module only returns the device information.
type: str
choices: [absent, present, info]
default: info
fs_type:
description:
- - If specified and the partition does not exist, will set filesystem type to given partition.
+ - If specified and the partition does not exist, sets filesystem type to given partition.
- Parameter optional, but see notes below about negative O(part_start) values.
type: str
version_added: '0.2.0'
@@ -114,10 +117,11 @@ options:
version_added: '1.3.0'
notes:
- - When fetching information about a new disk and when the version of parted installed on the system is before version 3.1, the module queries
- the kernel through C(/sys/) to obtain disk information. In this case the units CHS and CYL are not supported.
- - Negative O(part_start) start values were rejected if O(fs_type) was not given. This bug was fixed in parted 3.2.153. If you want to use negative
- O(part_start), specify O(fs_type) as well or make sure your system contains newer parted.
+ - When fetching information about a new disk and when the version of parted installed on the system is before version 3.1,
+ the module queries the kernel through C(/sys/) to obtain disk information. In this case the units CHS and CYL are not
+ supported.
+ - Negative O(part_start) start values were rejected if O(fs_type) was not given. This bug was fixed in parted 3.2.153. If
+ you want to use negative O(part_start), specify O(fs_type) as well or make sure your system contains newer parted.
"""
RETURN = r"""
@@ -135,35 +139,31 @@ partition_info:
script:
description: Parted script executed by module.
type: str
- sample: {
- "disk": {
- "dev": "/dev/sdb",
- "logical_block": 512,
- "model": "VMware Virtual disk",
- "physical_block": 512,
- "size": 5.0,
- "table": "msdos",
- "unit": "gib"
- },
- "partitions": [{
- "begin": 0.0,
- "end": 1.0,
- "flags": ["boot", "lvm"],
- "fstype": "",
- "name": "",
- "num": 1,
+ sample:
+ "disk":
+ "dev": "/dev/sdb"
+ "logical_block": 512
+ "model": "VMware Virtual disk"
+ "physical_block": 512
+ "size": 5.0
+ "table": "msdos"
+ "unit": "gib"
+ "partitions":
+ - "begin": 0.0
+ "end": 1.0
+ "flags": ["boot", "lvm"]
+ "fstype": ""
+ "name": ""
+ "num": 1
"size": 1.0
- }, {
- "begin": 1.0,
- "end": 5.0,
- "flags": [],
- "fstype": "",
- "name": "",
- "num": 2,
+ - "begin": 1.0
+ "end": 5.0
+ "flags": []
+ "fstype": ""
+ "name": ""
+ "num": 2
"size": 4.0
- }],
- "script": "unit KiB print "
- }
+ "script": "unit KiB print "
"""
EXAMPLES = r"""
@@ -583,11 +583,8 @@ def read_record(file_path, default=None):
Reads the first line of a file and returns it.
"""
try:
- f = open(file_path, 'r')
- try:
+ with open(file_path, 'r') as f:
return f.readline().strip()
- finally:
- f.close()
except IOError:
return default
diff --git a/plugins/modules/pear.py b/plugins/modules/pear.py
index ba8f5f9ca2..5eb84b509d 100644
--- a/plugins/modules/pear.py
+++ b/plugins/modules/pear.py
@@ -45,14 +45,16 @@ options:
- Path to the pear executable.
prompts:
description:
- - List of regular expressions that can be used to detect prompts during pear package installation to answer the expected question.
- - Prompts will be processed in the same order as the packages list.
+ - List of regular expressions that can be used to detect prompts during pear package installation to answer the expected
+ question.
+ - Prompts are processed in the same order as the packages list.
- You can optionally specify an answer to any question in the list.
- - If no answer is provided, the list item will only contain the regular expression.
- - "To specify an answer, the item will be a dict with the regular expression as key and the answer as value C(my_regular_expression: 'an_answer')."
+ - If no answer is provided, the list item must contain only the regular expression.
+ - "To specify an answer, the item must be a dictionary with the regular expression as key and the answer as value C(my_regular_expression:
+ 'an_answer')."
- You can provide a list containing items with or without answer.
- - A prompt list can be shorter or longer than the packages list but will issue a warning.
- - If you want to specify that a package will not need prompts in the middle of a list, V(null).
+ - A prompt list can be shorter or longer than the packages list but it issues a warning.
+ - If you want to specify that a package does not need prompts in the middle of a list, V(null).
type: list
elements: raw
version_added: 0.2.0
@@ -83,9 +85,9 @@ EXAMPLES = r"""
prompts:
- (.*)Enable internal debugging in APCu \[no\]: "yes"
-- name: Install multiple pear/pecl packages at once with prompts. Prompts will be processed on the same order as the packages order. If there
- is more prompts than packages, packages without prompts will be installed without any prompt expected. If there is more packages than prompts,
- additional prompts will be ignored.
+- name: Install multiple pear/pecl packages at once with prompts. Prompts will be processed on the same order as the packages
+ order. If there is more prompts than packages, packages without prompts will be installed without any prompt expected.
+ If there is more packages than prompts, additional prompts will be ignored.
community.general.pear:
name: pecl/gnupg, pecl/apcu
state: present
@@ -93,9 +95,9 @@ EXAMPLES = r"""
- I am a test prompt because gnupg doesnt asks anything
- (.*)Enable internal debugging in APCu \[no\]: "yes"
-- name: Install multiple pear/pecl packages at once skipping the first prompt. Prompts will be processed on the same order as the packages order.
- If there is more prompts than packages, packages without prompts will be installed without any prompt expected. If there is more packages
- than prompts, additional prompts will be ignored.
+- name: Install multiple pear/pecl packages at once skipping the first prompt. Prompts will be processed on the same order
+ as the packages order. If there is more prompts than packages, packages without prompts will be installed without any
+ prompt expected. If there is more packages than prompts, additional prompts will be ignored.
community.general.pear:
name: pecl/gnupg, pecl/apcu
state: present
@@ -224,7 +226,7 @@ def install_packages(module, state, packages, prompts):
# Preparing prompts answer according to item type
tmp_prompts = []
for _item in prompts:
- # If the current item is a dict then we expect it's key to be the prompt regex and it's value to be the answer
+ # If the current item is a dict then we expect its key to be the prompt regex and its value to be the answer
# We also expect here that the dict only has ONE key and the first key will be taken
if isinstance(_item, dict):
key = list(_item.keys())[0]
@@ -291,8 +293,8 @@ def main():
argument_spec=dict(
name=dict(aliases=['pkg'], required=True),
state=dict(default='present', choices=['present', 'installed', "latest", 'absent', 'removed']),
- executable=dict(default=None, required=False, type='path'),
- prompts=dict(default=None, required=False, type='list', elements='raw'),
+ executable=dict(type='path'),
+ prompts=dict(type='list', elements='raw'),
),
supports_check_mode=True)
diff --git a/plugins/modules/pids.py b/plugins/modules/pids.py
index 99b52ef1dd..2db5dbfa23 100644
--- a/plugins/modules/pids.py
+++ b/plugins/modules/pids.py
@@ -7,9 +7,10 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
-DOCUMENTATION = '''
+DOCUMENTATION = r"""
module: pids
-description: "Retrieves a list of PIDs of given process name in Ansible controller/controlled machines.Returns an empty list if no process in that name exists."
+description: "Retrieves a list of PIDs of given process name in Ansible controller/controlled machines. Returns an empty list
+ if no process in that name exists."
short_description: Retrieves process IDs list if the process is running otherwise return empty list
author:
- Saranya Sridharan (@saranyasridharan)
@@ -35,13 +36,13 @@ options:
type: bool
default: false
version_added: 3.0.0
-'''
+"""
-EXAMPLES = r'''
+EXAMPLES = r"""
# Pass the process name
- name: Getting process IDs of the process
community.general.pids:
- name: python
+ name: python
register: pids_of_python
- name: Printing the process IDs obtained
@@ -52,15 +53,15 @@ EXAMPLES = r'''
community.general.pids:
pattern: python(2(\.7)?|3(\.6)?)?\s+myapp\.py
register: myapp_pids
-'''
+"""
-RETURN = '''
+RETURN = r"""
pids:
- description: Process IDs of the given process
+ description: Process IDs of the given process.
returned: list of none, one, or more process IDs
type: list
- sample: [100,200]
-'''
+ sample: [100, 200]
+"""
import abc
import re
diff --git a/plugins/modules/pingdom.py b/plugins/modules/pingdom.py
index bd4826a780..7c82063ab9 100644
--- a/plugins/modules/pingdom.py
+++ b/plugins/modules/pingdom.py
@@ -8,56 +8,55 @@ from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = '''
-
+DOCUMENTATION = r"""
module: pingdom
short_description: Pause/unpause Pingdom alerts
description:
- - This module will let you pause/unpause Pingdom alerts
+ - This module lets you pause/unpause Pingdom alerts.
author:
- - "Dylan Silva (@thaumos)"
- - "Justin Johns (!UNKNOWN)"
+ - "Dylan Silva (@thaumos)"
+ - "Justin Johns (!UNKNOWN)"
requirements:
- - "This pingdom python library: https://github.com/mbabineau/pingdom-python"
+ - "This pingdom python library: U(https://github.com/mbabineau/pingdom-python)"
extends_documentation_fragment:
- - community.general.attributes
+ - community.general.attributes
attributes:
- check_mode:
- support: none
- diff_mode:
- support: none
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
options:
- state:
- type: str
- description:
- - Define whether or not the check should be running or paused.
- required: true
- choices: [ "running", "paused", "started", "stopped" ]
- checkid:
- type: str
- description:
- - Pingdom ID of the check.
- required: true
- uid:
- type: str
- description:
- - Pingdom user ID.
- required: true
- passwd:
- type: str
- description:
- - Pingdom user password.
- required: true
- key:
- type: str
- description:
- - Pingdom API key.
- required: true
+ state:
+ type: str
+ description:
+ - Define whether or not the check should be running or paused.
+ required: true
+ choices: ["running", "paused", "started", "stopped"]
+ checkid:
+ type: str
+ description:
+ - Pingdom ID of the check.
+ required: true
+ uid:
+ type: str
+ description:
+ - Pingdom user ID.
+ required: true
+ passwd:
+ type: str
+ description:
+ - Pingdom user password.
+ required: true
+ key:
+ type: str
+ description:
+ - Pingdom API key.
+ required: true
notes:
- - This module does not yet have support to add/remove checks.
-'''
+ - This module does not yet have support to add/remove checks.
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Pause the check with the ID of 12345
community.general.pingdom:
uid: example@example.com
@@ -73,7 +72,7 @@ EXAMPLES = '''
key: apipassword123
checkid: 12345
state: running
-'''
+"""
import traceback
@@ -133,10 +132,10 @@ def main():
passwd = module.params['passwd']
key = module.params['key']
- if (state == "paused" or state == "stopped"):
+ if state == "paused" or state == "stopped":
(rc, name, result) = pause(checkid, uid, passwd, key)
- if (state == "running" or state == "started"):
+ if state == "running" or state == "started":
(rc, name, result) = unpause(checkid, uid, passwd, key)
if rc != 0:
diff --git a/plugins/modules/pip_package_info.py b/plugins/modules/pip_package_info.py
index f7354e3678..80bdedf7fe 100644
--- a/plugins/modules/pip_package_info.py
+++ b/plugins/modules/pip_package_info.py
@@ -9,33 +9,33 @@
from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = '''
+DOCUMENTATION = r"""
module: pip_package_info
short_description: Pip package information
description:
- - Return information about installed pip packages
+ - Return information about installed pip packages.
extends_documentation_fragment:
- community.general.attributes
- community.general.attributes.info_module
options:
clients:
description:
- - A list of the pip executables that will be used to get the packages.
- They can be supplied with the full path or just the executable name, for example V(pip3.7).
+ - A list of the pip executables that are used to get the packages. They can be supplied with the full path or just the
+ executable name, for example V(pip3.7).
default: ['pip']
required: false
type: list
elements: path
requirements:
- pip >= 20.3b1 (necessary for the C(--format) option)
- - The requested pip executables must be installed on the target.
+ - The requested C(pip) executables must be installed on the target.
author:
- Matthew Jones (@matburt)
- Brian Coca (@bcoca)
- Adam Miller (@maxamillion)
-'''
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Just get the list from default pip
community.general.pip_package_info:
@@ -46,52 +46,55 @@ EXAMPLES = '''
- name: Get from specific paths (virtualenvs?)
community.general.pip_package_info:
clients: '/home/me/projec42/python/pip3.5'
-'''
+"""
-RETURN = '''
+RETURN = r"""
packages:
- description: a dictionary of installed package data
+ description: A dictionary of installed package data.
returned: always
type: dict
contains:
python:
- description: A dictionary with each pip client which then contains a list of dicts with python package information
+ description: A dictionary with each pip client which then contains a list of dicts with python package information.
returned: always
type: dict
sample:
- "packages": {
+ {
+ "packages": {
"pip": {
- "Babel": [
- {
- "name": "Babel",
- "source": "pip",
- "version": "2.6.0"
- }
- ],
- "Flask": [
- {
- "name": "Flask",
- "source": "pip",
- "version": "1.0.2"
- }
- ],
- "Flask-SQLAlchemy": [
- {
- "name": "Flask-SQLAlchemy",
- "source": "pip",
- "version": "2.3.2"
- }
- ],
- "Jinja2": [
- {
- "name": "Jinja2",
- "source": "pip",
- "version": "2.10"
- }
- ],
- },
+ "Babel": [
+ {
+ "name": "Babel",
+ "source": "pip",
+ "version": "2.6.0"
+ }
+ ],
+ "Flask": [
+ {
+ "name": "Flask",
+ "source": "pip",
+ "version": "1.0.2"
+ }
+ ],
+ "Flask-SQLAlchemy": [
+ {
+ "name": "Flask-SQLAlchemy",
+ "source": "pip",
+ "version": "2.3.2"
+ }
+ ],
+ "Jinja2": [
+ {
+ "name": "Jinja2",
+ "source": "pip",
+ "version": "2.10"
+ }
+ ]
+ }
+ }
}
-'''
+"""
+
import json
import os
diff --git a/plugins/modules/pipx.py b/plugins/modules/pipx.py
index c317ae8da8..778810be0c 100644
--- a/plugins/modules/pipx.py
+++ b/plugins/modules/pipx.py
@@ -9,16 +9,15 @@ from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = """
----
+DOCUMENTATION = r"""
module: pipx
short_description: Manages applications installed with pipx
version_added: 3.8.0
description:
-- Manage Python applications installed in isolated virtualenvs using pipx.
+ - Manage Python applications installed in isolated virtualenvs using pipx.
extends_documentation_fragment:
-- community.general.attributes
-- community.general.pipx
+ - community.general.attributes
+ - community.general.pipx
attributes:
check_mode:
support: full
@@ -28,131 +27,140 @@ options:
state:
type: str
choices:
- - present
- - absent
- - install
- - install_all
- - uninstall
- - uninstall_all
- - inject
- - uninject
- - upgrade
- - upgrade_shared
- - upgrade_all
- - reinstall
- - reinstall_all
- - latest
- - pin
- - unpin
+ - present
+ - absent
+ - install
+ - install_all
+ - uninstall
+ - uninstall_all
+ - inject
+ - uninject
+ - upgrade
+ - upgrade_shared
+ - upgrade_all
+ - reinstall
+ - reinstall_all
+ - latest
+ - pin
+ - unpin
default: install
description:
- - Desired state for the application.
- - The states V(present) and V(absent) are aliases to V(install) and V(uninstall), respectively.
- - The state V(latest) is equivalent to executing the task twice, with state V(install) and then V(upgrade). It was added in community.general
- 5.5.0.
- - The states V(install_all), V(uninject), V(upgrade_shared), V(pin) and V(unpin) are only available in C(pipx>=1.6.0), make sure to have a
- compatible version when using this option. These states have been added in community.general 9.4.0.
+ - Desired state for the application.
+ - The states V(present) and V(absent) are aliases to V(install) and V(uninstall), respectively.
+ - The state V(latest) is equivalent to executing the task twice, with state V(install) and then V(upgrade). It was added
+ in community.general 5.5.0.
+ - The states V(install_all), V(uninject), V(upgrade_shared), V(pin) and V(unpin) are only available in C(pipx>=1.6.0),
+ make sure to have a compatible version when using this option. These states have been added in community.general 9.4.0.
name:
type: str
description:
- - The name of the application. In C(pipx) documentation it is also referred to as the name of the virtual environment where the application
- will be installed.
- - If O(name) is a simple package name without version specifiers, then that name is used as the Python package name to be installed.
- - Use O(source) for passing package specifications or installing from URLs or directories.
+ - The name of the application and also the name of the Python package being installed.
+ - In C(pipx) documentation it is also referred to as the name of the virtual environment where the application is installed.
+ - If O(name) is a simple package name without version specifiers, then that name is used as the Python package name
+ to be installed.
+ - Starting in community.general 10.7.0, you can use package specifiers when O(state=present) or O(state=install). For
+ example, O(name=tox<4.0.0) or O(name=tox>3.0.27).
+ - Please note that when you use O(state=present) and O(name) with version specifiers, contrary to the behavior of C(pipx),
+ this module honors the version specifier and installs a version of the application that satisfies it. If you want
+ to ensure the reinstallation of the application even when the version specifier is met, then you must use O(force=true),
+ or perhaps use O(state=upgrade) instead.
+ - Use O(source) for installing from URLs or directories.
source:
type: str
description:
- - Source for the package. This option is used when O(state=install) or O(state=latest), and it is ignored with other states.
- - Use O(source) when installing a Python package with version specifier, or from a local path, from a VCS URL or compressed file.
- - The value of this option is passed as-is to C(pipx).
- - O(name) is still required when using O(source) to establish the application name without fetching the package from a remote source.
+ - Source for the package. This option is used when O(state=install) or O(state=latest), and it is ignored with other
+ states.
+ - Use O(source) when installing a Python package with version specifier, or from a local path, from a VCS URL or compressed
+ file.
+ - The value of this option is passed as-is to C(pipx).
+ - O(name) is still required when using O(source) to establish the application name without fetching the package from
+ a remote source.
+ - The module is not idempotent when using O(source).
install_apps:
description:
- - Add apps from the injected packages.
- - Only used when O(state=inject).
+ - Add apps from the injected packages.
+ - Only used when O(state=inject).
type: bool
default: false
version_added: 6.5.0
install_deps:
description:
- - Include applications of dependent packages.
- - Only used when O(state=install), O(state=latest), or O(state=inject).
+ - Include applications of dependent packages.
+ - Only used when O(state=install), O(state=latest), or O(state=inject).
type: bool
default: false
inject_packages:
description:
- - Packages to be injected into an existing virtual environment.
- - Only used when O(state=inject).
+ - Packages to be injected into an existing virtual environment.
+ - Only used when O(state=inject).
type: list
elements: str
force:
description:
- - Force modification of the application's virtual environment. See C(pipx) for details.
- - Only used when O(state=install), O(state=upgrade), O(state=upgrade_all), O(state=latest), or O(state=inject).
+ - Force modification of the application's virtual environment. See C(pipx) for details.
+ - Only used when O(state=install), O(state=upgrade), O(state=upgrade_all), O(state=latest), or O(state=inject).
+ - The module is not idempotent when O(force=true).
type: bool
default: false
include_injected:
description:
- - Upgrade the injected packages along with the application.
- - Only used when O(state=upgrade), O(state=upgrade_all), or O(state=latest).
- - This is used with O(state=upgrade) and O(state=latest) since community.general 6.6.0.
+ - Upgrade the injected packages along with the application.
+ - Only used when O(state=upgrade), O(state=upgrade_all), or O(state=latest).
+ - This is used with O(state=upgrade) and O(state=latest) since community.general 6.6.0.
type: bool
default: false
index_url:
description:
- - Base URL of Python Package Index.
- - Only used when O(state=install), O(state=upgrade), O(state=latest), or O(state=inject).
+ - Base URL of Python Package Index.
+ - Only used when O(state=install), O(state=upgrade), O(state=latest), or O(state=inject).
type: str
python:
description:
- - Python version to be used when creating the application virtual environment. Must be 3.6+.
- - Only used when O(state=install), O(state=latest), O(state=reinstall), or O(state=reinstall_all).
+ - Python version to be used when creating the application virtual environment. Must be 3.6+.
+ - Only used when O(state=install), O(state=latest), O(state=reinstall), or O(state=reinstall_all).
type: str
system_site_packages:
description:
- - Give application virtual environment access to the system site-packages directory.
- - Only used when O(state=install) or O(state=latest).
+ - Give application virtual environment access to the system site-packages directory.
+ - Only used when O(state=install) or O(state=latest).
type: bool
default: false
version_added: 6.6.0
editable:
description:
- - Install the project in editable mode.
+ - Install the project in editable mode.
type: bool
default: false
version_added: 4.6.0
pip_args:
description:
- - Arbitrary arguments to pass directly to C(pip).
+ - Arbitrary arguments to pass directly to C(pip).
type: str
version_added: 4.6.0
suffix:
description:
- - Optional suffix for virtual environment and executable names.
- - "B(Warning:) C(pipx) documentation states this is an B(experimental) feature subject to change."
+ - Optional suffix for virtual environment and executable names.
+ - B(Warning:) C(pipx) documentation states this is an B(experimental) feature subject to change.
type: str
version_added: 9.3.0
global:
version_added: 9.4.0
spec_metadata:
description:
- - Spec metadata file for O(state=install_all).
- - This content of the file is usually generated with C(pipx list --json), and it can be obtained with M(community.general.pipx_info) with
- O(community.general.pipx_info#module:include_raw=true) and obtaining the content from the RV(community.general.pipx_info#module:raw_output).
+ - Spec metadata file for O(state=install_all).
+ - This content of the file is usually generated with C(pipx list --json), and it can be obtained with M(community.general.pipx_info)
+ with O(community.general.pipx_info#module:include_raw=true) and obtaining the content from the RV(community.general.pipx_info#module:raw_output).
type: path
version_added: 9.4.0
-notes:
-- >
- This first implementation does not verify whether a specified version constraint has been installed or not.
- Hence, when using version operators, C(pipx) module will always try to execute the operation,
- even when the application was previously installed.
- This feature will be added in the future.
+requirements:
+ - When using O(name) with version specifiers, the Python package C(packaging) is required.
+ - If the package C(packaging) is at a version lesser than C(22.0.0), it fails silently when processing invalid specifiers,
+ like C(tox<<<<4.0).
author:
-- "Alexei Znamensky (@russoz)"
+ - "Alexei Znamensky (@russoz)"
"""
-EXAMPLES = """
----
+EXAMPLES = r"""
- name: Install tox
community.general.pipx:
name: tox
@@ -167,6 +175,12 @@ EXAMPLES = """
name: tox
state: upgrade
+- name: Install or upgrade tox with dependency group 'docs'
+ community.general.pipx:
+ name: tox
+ source: tox[docs]
+ state: latest
+
- name: Reinstall black with specific Python version
community.general.pipx:
name: black
@@ -181,20 +195,29 @@ EXAMPLES = """
- name: Install multiple packages from list
vars:
pipx_packages:
- - pycowsay
- - black
- - tox
+ - pycowsay
+ - black
+ - tox
community.general.pipx:
name: "{{ item }}"
state: latest
with_items: "{{ pipx_packages }}"
"""
+RETURN = r"""
+version:
+ description: Version of pipx.
+ type: str
+ returned: always
+ sample: "1.7.1"
+ version_added: 10.1.0
+"""
-import json
from ansible_collections.community.general.plugins.module_utils.module_helper import StateModuleHelper
-from ansible_collections.community.general.plugins.module_utils.pipx import pipx_runner, pipx_common_argspec
+from ansible_collections.community.general.plugins.module_utils.pipx import pipx_runner, pipx_common_argspec, make_process_dict
+from ansible_collections.community.general.plugins.module_utils.pkg_req import PackageRequirement
+from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
from ansible.module_utils.facts.compat import ansible_facts
@@ -248,33 +271,15 @@ class PipX(StateModuleHelper):
),
supports_check_mode=True,
)
- use_old_vardict = False
def _retrieve_installed(self):
- def process_list(rc, out, err):
- if not out:
- return {}
+ output_process = make_process_dict(include_injected=True)
+ installed, dummy = self.runner('_list global', output_process=output_process).run()
- results = {}
- raw_data = json.loads(out)
- for venv_name, venv in raw_data['venvs'].items():
- results[venv_name] = {
- 'version': venv['metadata']['main_package']['package_version'],
- 'injected': {k: v['package_version'] for k, v in venv['metadata']['injected_packages'].items()},
- }
- return results
+ if self.app_name is None:
+ return installed
- installed = self.runner('_list', output_process=process_list).run(_list=1)
-
- if self.vars.name is not None:
- name = _make_name(self.vars.name, self.vars.suffix)
- app_list = installed.get(name)
- if app_list:
- return {name: app_list}
- else:
- return {}
-
- return installed
+ return {k: v for k, v in installed.items() if k == self.app_name}
def __init_module__(self):
if self.vars.executable:
@@ -284,8 +289,20 @@ class PipX(StateModuleHelper):
self.command = [facts['python']['executable'], '-m', 'pipx']
self.runner = pipx_runner(self.module, self.command)
+ pkg_req = PackageRequirement(self.module, self.vars.name)
+ self.parsed_name = pkg_req.parsed_name
+ self.parsed_req = pkg_req.requirement
+ self.app_name = _make_name(self.parsed_name, self.vars.suffix)
+
self.vars.set('application', self._retrieve_installed(), change=True, diff=True)
+ with self.runner("version") as ctx:
+ rc, out, err = ctx.run()
+ self.vars.version = out.strip()
+
+ if LooseVersion(self.vars.version) < LooseVersion("1.7.0"):
+ self.do_raise("The pipx tool must be at least at version 1.7.0")
+
def __quit_module__(self):
self.vars.application = self._retrieve_installed()
@@ -296,12 +313,27 @@ class PipX(StateModuleHelper):
self.vars.set('run_info', ctx.run_info, verbosity=4)
def state_install(self):
- if not self.vars.application or self.vars.force:
- self.changed = True
- args_order = 'state global index_url install_deps force python system_site_packages editable pip_args suffix name_source'
- with self.runner(args_order, check_mode_skip=True) as ctx:
- ctx.run(name_source=[self.vars.name, self.vars.source])
- self._capture_results(ctx)
+ # If we have a version spec and no source, use the version spec as source
+ if self.parsed_req and not self.vars.source:
+ self.vars.source = self.vars.name
+
+ if self.vars.application.get(self.app_name):
+ is_installed = True
+ version_match = self.vars.application[self.app_name]['version'] in self.parsed_req.specifier if self.parsed_req else True
+ force = self.vars.force or (not version_match)
+ else:
+ is_installed = False
+ version_match = False
+ force = self.vars.force
+
+ if is_installed and version_match and not force:
+ return
+
+ self.changed = True
+ args_order = 'state global index_url install_deps force python system_site_packages editable pip_args suffix name_source'
+ with self.runner(args_order, check_mode_skip=True) as ctx:
+ ctx.run(name_source=[self.parsed_name, self.vars.source], force=force)
+ self._capture_results(ctx)
state_present = state_install
@@ -383,12 +415,12 @@ class PipX(StateModuleHelper):
def state_latest(self):
if not self.vars.application or self.vars.force:
self.changed = True
- args_order = 'state index_url install_deps force python system_site_packages editable pip_args suffix name_source'
+ args_order = 'state global index_url install_deps force python system_site_packages editable pip_args suffix name_source'
with self.runner(args_order, check_mode_skip=True) as ctx:
ctx.run(state='install', name_source=[self.vars.name, self.vars.source])
self._capture_results(ctx)
- with self.runner('state include_injected index_url force editable pip_args name', check_mode_skip=True) as ctx:
+ with self.runner('state global include_injected index_url force editable pip_args name', check_mode_skip=True) as ctx:
ctx.run(state='upgrade')
self._capture_results(ctx)
diff --git a/plugins/modules/pipx_info.py b/plugins/modules/pipx_info.py
index 65c0ba552e..fc74df496a 100644
--- a/plugins/modules/pipx_info.py
+++ b/plugins/modules/pipx_info.py
@@ -9,46 +9,44 @@ from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = """
----
+DOCUMENTATION = r"""
module: pipx_info
short_description: Rretrieves information about applications installed with pipx
version_added: 5.6.0
description:
-- Retrieve details about Python applications installed in isolated virtualenvs using pipx.
+ - Retrieve details about Python applications installed in isolated virtualenvs using pipx.
extends_documentation_fragment:
-- community.general.attributes
-- community.general.attributes.info_module
-- community.general.pipx
+ - community.general.attributes
+ - community.general.attributes.info_module
+ - community.general.pipx
options:
name:
description:
- - Name of an application installed with C(pipx).
+ - Name of an application installed with C(pipx).
type: str
include_deps:
description:
- - Include dependent packages in the output.
+ - Include dependent packages in the output.
type: bool
default: false
include_injected:
description:
- - Include injected packages in the output.
+ - Include injected packages in the output.
type: bool
default: false
include_raw:
description:
- - Returns the raw output of C(pipx list --json).
- - The raw output is not affected by O(include_deps) or O(include_injected).
+ - Returns the raw output of C(pipx list --json).
+ - The raw output is not affected by O(include_deps) or O(include_injected).
type: bool
default: false
global:
version_added: 9.3.0
author:
-- "Alexei Znamensky (@russoz)"
+ - "Alexei Znamensky (@russoz)"
"""
-EXAMPLES = """
----
+EXAMPLES = r"""
- name: retrieve all installed applications
community.general.pipx_info: {}
@@ -68,10 +66,9 @@ EXAMPLES = """
include_deps: true
"""
-RETURN = """
----
+RETURN = r"""
application:
- description: The list of installed applications
+ description: The list of installed applications.
returned: success
type: list
elements: dict
@@ -98,6 +95,15 @@ application:
type: dict
sample:
licenses: "0.6.1"
+ pinned:
+ description:
+ - Whether the installed application is pinned or not.
+ - When using C(pipx<=1.6.0), this returns C(null).
+ returned: success
+ type: bool
+ sample:
+ pinned: true
+ version_added: 10.0.0
raw_output:
description: The raw output of the C(pipx list) command, when O(include_raw=true). Used for debugging.
@@ -109,13 +115,27 @@ cmd:
returned: success
type: list
elements: str
- sample: ["/usr/bin/python3.10", "-m", "pipx", "list", "--include-injected", "--json"]
+ sample:
+ [
+ "/usr/bin/python3.10",
+ "-m",
+ "pipx",
+ "list",
+ "--include-injected",
+ "--json"
+ ]
+
+version:
+ description: Version of pipx.
+ type: str
+ returned: always
+ sample: "1.7.1"
+ version_added: 10.1.0
"""
-import json
-
from ansible_collections.community.general.plugins.module_utils.module_helper import ModuleHelper
-from ansible_collections.community.general.plugins.module_utils.pipx import pipx_runner, pipx_common_argspec
+from ansible_collections.community.general.plugins.module_utils.pipx import pipx_runner, pipx_common_argspec, make_process_dict
+from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
from ansible.module_utils.facts.compat import ansible_facts
@@ -133,7 +153,6 @@ class PipXInfo(ModuleHelper):
argument_spec=argument_spec,
supports_check_mode=True,
)
- use_old_vardict = False
def __init_module__(self):
if self.vars.executable:
@@ -142,42 +161,28 @@ class PipXInfo(ModuleHelper):
facts = ansible_facts(self.module, gather_subset=['python'])
self.command = [facts['python']['executable'], '-m', 'pipx']
self.runner = pipx_runner(self.module, self.command)
+ with self.runner("version") as ctx:
+ rc, out, err = ctx.run()
+ self.vars.version = out.strip()
- # self.vars.set('application', self._retrieve_installed(), change=True, diff=True)
+ if LooseVersion(self.vars.version) < LooseVersion("1.7.0"):
+ self.do_raise("The pipx tool must be at least at version 1.7.0")
def __run__(self):
- def process_list(rc, out, err):
- if not out:
- return []
-
- results = []
- raw_data = json.loads(out)
+ output_process = make_process_dict(self.vars.include_injected, self.vars.include_deps)
+ with self.runner('_list global', output_process=output_process) as ctx:
+ applications, raw_data = ctx.run()
if self.vars.include_raw:
self.vars.raw_output = raw_data
if self.vars.name:
- if self.vars.name in raw_data['venvs']:
- data = {self.vars.name: raw_data['venvs'][self.vars.name]}
- else:
- data = {}
+ self.vars.application = [
+ v
+ for k, v in applications.items()
+ if k == self.vars.name
+ ]
else:
- data = raw_data['venvs']
-
- for venv_name, venv in data.items():
- entry = {
- 'name': venv_name,
- 'version': venv['metadata']['main_package']['package_version']
- }
- if self.vars.include_injected:
- entry['injected'] = {k: v['package_version'] for k, v in venv['metadata']['injected_packages'].items()}
- if self.vars.include_deps:
- entry['dependencies'] = list(venv['metadata']['main_package']['app_paths_of_dependencies'])
- results.append(entry)
-
- return results
-
- with self.runner('_list global', output_process=process_list) as ctx:
- self.vars.application = ctx.run(_list=1)
+ self.vars.application = list(applications.values())
self._capture_results(ctx)
def _capture_results(self, ctx):
diff --git a/plugins/modules/pkg5.py b/plugins/modules/pkg5.py
index 08fa9272f7..34e86c3774 100644
--- a/plugins/modules/pkg5.py
+++ b/plugins/modules/pkg5.py
@@ -8,11 +8,10 @@
from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
module: pkg5
author:
-- Peter Oliver (@mavit)
+ - Peter Oliver (@mavit)
short_description: Manages packages with the Solaris 11 Image Packaging System
description:
- IPS packages are the native packages in Solaris 11 and higher.
@@ -36,7 +35,7 @@ options:
state:
description:
- Whether to install (V(present), V(latest)), or remove (V(absent)) a package.
- choices: [ absent, latest, present, installed, removed, uninstalled ]
+ choices: [absent, latest, present, installed, removed, uninstalled]
default: present
type: str
accept_licenses:
@@ -44,7 +43,7 @@ options:
- Accept any licences.
type: bool
default: false
- aliases: [ accept, accept_licences ]
+ aliases: [accept, accept_licences]
be_name:
description:
- Creates a new boot environment with the given name.
@@ -60,8 +59,8 @@ options:
type: bool
default: false
version_added: 9.0.0
-'''
-EXAMPLES = '''
+"""
+EXAMPLES = r"""
- name: Install Vim
community.general.pkg5:
name: editor/vim
@@ -79,9 +78,9 @@ EXAMPLES = '''
- name: Install several packages at once
community.general.pkg5:
name:
- - /file/gnu-findutils
- - /text/gnu-grep
-'''
+ - /file/gnu-findutils
+ - /text/gnu-grep
+"""
import re
diff --git a/plugins/modules/pkg5_publisher.py b/plugins/modules/pkg5_publisher.py
index 6d07e455f4..26abded4e2 100644
--- a/plugins/modules/pkg5_publisher.py
+++ b/plugins/modules/pkg5_publisher.py
@@ -10,15 +10,13 @@ from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
module: pkg5_publisher
author: "Peter Oliver (@mavit)"
short_description: Manages Solaris 11 Image Packaging System publishers
description:
- IPS packages are the native packages in Solaris 11 and higher.
- - This modules will configure which publishers a client will download IPS
- packages from.
+ - This module configures which publishers a client downloads IPS packages from.
extends_documentation_fragment:
- community.general.attributes
attributes:
@@ -31,18 +29,17 @@ options:
description:
- The publisher's name.
required: true
- aliases: [ publisher ]
+ aliases: [publisher]
type: str
state:
description:
- Whether to ensure that a publisher is present or absent.
default: present
- choices: [ present, absent ]
+ choices: [present, absent]
type: str
sticky:
description:
- - Packages installed from a sticky repository can only receive updates
- from that repository.
+ - Packages installed from a sticky repository can only receive updates from that repository.
type: bool
enabled:
description:
@@ -60,8 +57,8 @@ options:
- Multiple values may be provided.
type: list
elements: str
-'''
-EXAMPLES = '''
+"""
+EXAMPLES = r"""
- name: Fetch packages for the solaris publisher direct from Oracle
community.general.pkg5_publisher:
name: solaris
@@ -72,7 +69,7 @@ EXAMPLES = '''
community.general.pkg5_publisher:
name: site
origin: 'https://pkg.example.com/site/'
-'''
+"""
from ansible.module_utils.basic import AnsibleModule
diff --git a/plugins/modules/pkgin.py b/plugins/modules/pkgin.py
index 8b29655d37..8695f1b5af 100644
--- a/plugins/modules/pkgin.py
+++ b/plugins/modules/pkgin.py
@@ -16,70 +16,67 @@ from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
module: pkgin
short_description: Package manager for SmartOS, NetBSD, et al
description:
- - "The standard package manager for SmartOS, but also usable on NetBSD
- or any OS that uses C(pkgsrc). (Home: U(http://pkgin.net/))"
+ - 'The standard package manager for SmartOS, but also usable on NetBSD or any OS that uses C(pkgsrc). (Home: U(http://pkgin.net/)).'
author:
- - "Larry Gilbert (@L2G)"
- - "Shaun Zinck (@szinck)"
- - "Jasper Lievisse Adriaanse (@jasperla)"
+ - "Larry Gilbert (@L2G)"
+ - "Shaun Zinck (@szinck)"
+ - "Jasper Lievisse Adriaanse (@jasperla)"
notes:
- - "Known bug with pkgin < 0.8.0: if a package is removed and another
- package depends on it, the other package will be silently removed as
- well."
+ - 'Known bug with pkgin < 0.8.0: if a package is removed and another package depends on it, the other package is silently
+ removed as well.'
extends_documentation_fragment:
- - community.general.attributes
+ - community.general.attributes
attributes:
- check_mode:
- support: full
- diff_mode:
- support: none
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
options:
- name:
- description:
- - Name of package to install/remove;
- - multiple names may be given, separated by commas
- aliases: [pkg]
- type: list
- elements: str
- state:
- description:
- - Intended state of the package
- choices: [ 'present', 'absent' ]
- default: present
- type: str
- update_cache:
- description:
- - Update repository database. Can be run with other steps or on it's own.
- type: bool
- default: false
- upgrade:
- description:
- - Upgrade main packages to their newer versions
- type: bool
- default: false
- full_upgrade:
- description:
- - Upgrade all packages to their newer versions
- type: bool
- default: false
- clean:
- description:
- - Clean packages cache
- type: bool
- default: false
- force:
- description:
- - Force package reinstall
- type: bool
- default: false
-'''
+ name:
+ description:
+ - Name of package to install/remove;
+ - Multiple names may be given, separated by commas.
+ aliases: [pkg]
+ type: list
+ elements: str
+ state:
+ description:
+ - Intended state of the package.
+ choices: ['present', 'absent']
+ default: present
+ type: str
+ update_cache:
+ description:
+ - Update repository database. Can be run with other steps or on its own.
+ type: bool
+ default: false
+ upgrade:
+ description:
+ - Upgrade main packages to their newer versions.
+ type: bool
+ default: false
+ full_upgrade:
+ description:
+ - Upgrade all packages to their newer versions.
+ type: bool
+ default: false
+ clean:
+ description:
+ - Clean packages cache.
+ type: bool
+ default: false
+ force:
+ description:
+ - Force package reinstall.
+ type: bool
+ default: false
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Install package foo
community.general.pkgin:
name: foo
@@ -125,7 +122,7 @@ EXAMPLES = '''
- name: Clean packages cache (equivalent to pkgin clean)
community.general.pkgin:
clean: true
-'''
+"""
import re
diff --git a/plugins/modules/pkgng.py b/plugins/modules/pkgng.py
index 7a04ee3a6e..58eafb9e0c 100644
--- a/plugins/modules/pkgng.py
+++ b/plugins/modules/pkgng.py
@@ -14,107 +14,101 @@ from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
module: pkgng
short_description: Package manager for FreeBSD >= 9.0
description:
- - Manage binary packages for FreeBSD using 'pkgng' which is available in versions after 9.0.
+ - Manage binary packages for FreeBSD using C(pkgng) which is available in versions after 9.0.
extends_documentation_fragment:
- - community.general.attributes
+ - community.general.attributes
attributes:
- check_mode:
- support: full
- diff_mode:
- support: none
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
options:
- name:
- description:
- - Name or list of names of packages to install/remove.
- - "With O(name=*), O(state=latest) will operate, but O(state=present) and O(state=absent) will be noops."
- required: true
- aliases: [pkg]
- type: list
- elements: str
- state:
- description:
- - State of the package.
- choices: [ 'present', 'latest', 'absent' ]
- required: false
- default: present
- type: str
- cached:
- description:
- - Use local package base instead of fetching an updated one.
- type: bool
- required: false
- default: false
- annotation:
- description:
- - A list of keyvalue-pairs of the form
- C(<+/-/:>[=]). A V(+) denotes adding an annotation, a
- V(-) denotes removing an annotation, and V(:) denotes modifying an
- annotation.
- If setting or modifying annotations, a value must be provided.
- required: false
- type: list
- elements: str
- pkgsite:
- description:
- - For pkgng versions before 1.1.4, specify packagesite to use
- for downloading packages. If not specified, use settings from
- C(/usr/local/etc/pkg.conf).
- - For newer pkgng versions, specify a the name of a repository
- configured in C(/usr/local/etc/pkg/repos).
- required: false
- type: str
- rootdir:
- description:
- - For pkgng versions 1.5 and later, pkg will install all packages
- within the specified root directory.
- - Can not be used together with O(chroot) or O(jail) options.
- required: false
- type: path
- chroot:
- description:
- - Pkg will chroot in the specified environment.
- - Can not be used together with O(rootdir) or O(jail) options.
- required: false
- type: path
- jail:
- description:
- - Pkg will execute in the given jail name or id.
- - Can not be used together with O(chroot) or O(rootdir) options.
- type: str
- autoremove:
- description:
- - Remove automatically installed packages which are no longer needed.
- required: false
- type: bool
- default: false
- ignore_osver:
- description:
- - Ignore FreeBSD OS version check, useful on -STABLE and -CURRENT branches.
- - Defines the E(IGNORE_OSVERSION) environment variable.
- required: false
- type: bool
- default: false
- version_added: 1.3.0
- use_globs:
- description:
- - Treat the package names as shell glob patterns.
- required: false
- type: bool
- default: true
- version_added: 9.3.0
+ name:
+ description:
+ - Name or list of names of packages to install/remove.
+ - With O(name=*), O(state=latest) operates, but O(state=present) and O(state=absent) are noops.
+ required: true
+ aliases: [pkg]
+ type: list
+ elements: str
+ state:
+ description:
+ - State of the package.
+ choices: ['present', 'latest', 'absent']
+ required: false
+ default: present
+ type: str
+ cached:
+ description:
+ - Use local package base instead of fetching an updated one.
+ type: bool
+ required: false
+ default: false
+ annotation:
+ description:
+ - A list of keyvalue-pairs of the form C(<+/-/:>[=]). A V(+) denotes adding an annotation, a V(-) denotes
+ removing an annotation, and V(:) denotes modifying an annotation. If setting or modifying annotations, a value must
+ be provided.
+ required: false
+ type: list
+ elements: str
+ pkgsite:
+ description:
+ - For C(pkgng) versions before 1.1.4, specify C(packagesite) to use for downloading packages. If not specified, use
+ settings from C(/usr/local/etc/pkg.conf).
+ - For newer C(pkgng) versions, specify a the name of a repository configured in C(/usr/local/etc/pkg/repos).
+ required: false
+ type: str
+ rootdir:
+ description:
+ - For C(pkgng) versions 1.5 and later, pkg installs all packages within the specified root directory.
+ - Can not be used together with O(chroot) or O(jail) options.
+ required: false
+ type: path
+ chroot:
+ description:
+ - Pkg chroots in the specified environment.
+ - Can not be used together with O(rootdir) or O(jail) options.
+ required: false
+ type: path
+ jail:
+ description:
+ - Pkg executes in the given jail name or ID.
+ - Can not be used together with O(chroot) or O(rootdir) options.
+ type: str
+ autoremove:
+ description:
+ - Remove automatically installed packages which are no longer needed.
+ required: false
+ type: bool
+ default: false
+ ignore_osver:
+ description:
+ - Ignore FreeBSD OS version check, useful on C(-STABLE) and C(-CURRENT) branches.
+ - Defines the E(IGNORE_OSVERSION) environment variable.
+ required: false
+ type: bool
+ default: false
+ version_added: 1.3.0
+ use_globs:
+ description:
+ - Treat the package names as shell glob patterns.
+ required: false
+ type: bool
+ default: true
+ version_added: 9.3.0
author: "bleader (@bleader)"
notes:
- - When using pkgsite, be careful that already in cache packages won't be downloaded again.
- - When used with a C(loop:) each package will be processed individually,
- it is much more efficient to pass the list directly to the O(name) option.
-'''
+ - When using pkgsite, be careful that already in cache packages are not downloaded again.
+ - When used with a C(loop:) each package is processed individually, it is much more efficient to pass the list directly
+ to the O(name) option.
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Install package foo
community.general.pkgng:
name: foo
@@ -149,7 +143,7 @@ EXAMPLES = '''
name: foo/bar
state: latest
use_globs: false
-'''
+"""
from collections import defaultdict
@@ -428,17 +422,17 @@ def autoremove_packages(module, run_pkgng):
def main():
module = AnsibleModule(
argument_spec=dict(
- state=dict(default="present", choices=["present", "latest", "absent"], required=False),
+ state=dict(default="present", choices=["present", "latest", "absent"]),
name=dict(aliases=["pkg"], required=True, type='list', elements='str'),
cached=dict(default=False, type='bool'),
- ignore_osver=dict(default=False, required=False, type='bool'),
- annotation=dict(required=False, type='list', elements='str'),
- pkgsite=dict(required=False),
- rootdir=dict(required=False, type='path'),
- chroot=dict(required=False, type='path'),
- jail=dict(required=False, type='str'),
+ ignore_osver=dict(default=False, type='bool'),
+ annotation=dict(type='list', elements='str'),
+ pkgsite=dict(),
+ rootdir=dict(type='path'),
+ chroot=dict(type='path'),
+ jail=dict(type='str'),
autoremove=dict(default=False, type='bool'),
- use_globs=dict(default=True, required=False, type='bool'),
+ use_globs=dict(default=True, type='bool'),
),
supports_check_mode=True,
mutually_exclusive=[["rootdir", "chroot", "jail"]])
diff --git a/plugins/modules/pkgutil.py b/plugins/modules/pkgutil.py
index 15f98a9d49..a40bff06ec 100644
--- a/plugins/modules/pkgutil.py
+++ b/plugins/modules/pkgutil.py
@@ -12,63 +12,63 @@ from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = r'''
----
+DOCUMENTATION = r"""
module: pkgutil
short_description: OpenCSW package management on Solaris
description:
-- This module installs, updates and removes packages from the OpenCSW project for Solaris.
-- Unlike the M(community.general.svr4pkg) module, it will resolve and download dependencies.
-- See U(https://www.opencsw.org/) for more information about the project.
+ - This module installs, updates and removes packages from the OpenCSW project for Solaris.
+ - Unlike the M(community.general.svr4pkg) module, it resolves and downloads dependencies.
+ - See U(https://www.opencsw.org/) for more information about the project.
author:
-- Alexander Winkler (@dermute)
-- David Ponessa (@scathatheworm)
+ - Alexander Winkler (@dermute)
+ - David Ponessa (@scathatheworm)
extends_documentation_fragment:
-- community.general.attributes
+ - community.general.attributes
attributes:
check_mode:
support: full
details:
- - In order to check the availability of packages, the catalog cache under C(/var/opt/csw/pkgutil) may be refreshed even in check mode.
+ - In order to check the availability of packages, the catalog cache under C(/var/opt/csw/pkgutil) may be refreshed even
+ in check mode.
diff_mode:
support: none
options:
name:
description:
- - The name of the package.
- - When using O(state=latest), this can be V('*'), which updates all installed packages managed by pkgutil.
+ - The name of the package.
+ - When using O(state=latest), this can be V('*'), which updates all installed packages managed by pkgutil.
type: list
required: true
elements: str
- aliases: [ pkg ]
+ aliases: [pkg]
site:
description:
- - The repository path to install the package from.
- - Its global definition is in C(/etc/opt/csw/pkgutil.conf).
+ - The repository path to install the package from.
+ - Its global definition is in C(/etc/opt/csw/pkgutil.conf).
required: false
type: str
state:
description:
- - Whether to install (V(present)/V(installed)), or remove (V(absent)/V(removed)) packages.
- - The upgrade (V(latest)) operation will update/install the packages to the latest version available.
+ - Whether to install (V(present)/V(installed)), or remove (V(absent)/V(removed)) packages.
+ - The upgrade (V(latest)) operation updates/installs the packages to the latest version available.
type: str
required: true
- choices: [ absent, installed, latest, present, removed ]
+ choices: [absent, installed, latest, present, removed]
update_catalog:
description:
- - If you always want to refresh your catalog from the mirror, even when it's not stale, set this to V(true).
+ - If you always want to refresh your catalog from the mirror, even when it is not stale, set this to V(true).
type: bool
default: false
force:
description:
- - To allow the update process to downgrade packages to match what is present in the repository, set this to V(true).
- - This is useful for rolling back to stable from testing, or similar operations.
+ - To allow the update process to downgrade packages to match what is present in the repository, set this to V(true).
+ - This is useful for rolling back to stable from testing, or similar operations.
type: bool
default: false
version_added: 1.2.0
-'''
+"""
-EXAMPLES = r'''
+EXAMPLES = r"""
- name: Install a package
community.general.pkgutil:
name: CSWcommon
@@ -88,8 +88,8 @@ EXAMPLES = r'''
- name: Install several packages
community.general.pkgutil:
name:
- - CSWsudo
- - CSWtop
+ - CSWsudo
+ - CSWtop
state: present
- name: Update all packages
@@ -102,9 +102,9 @@ EXAMPLES = r'''
name: '*'
state: latest
force: true
-'''
+"""
-RETURN = r''' # '''
+RETURN = r""" # """
from ansible.module_utils.basic import AnsibleModule
diff --git a/plugins/modules/pmem.py b/plugins/modules/pmem.py
index 4d10c448e1..1555ec842e 100644
--- a/plugins/modules/pmem.py
+++ b/plugins/modules/pmem.py
@@ -7,21 +7,20 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
-DOCUMENTATION = r'''
----
+DOCUMENTATION = r"""
author:
- - Masayoshi Mizuma (@mizumm)
+ - Masayoshi Mizuma (@mizumm)
module: pmem
short_description: Configure Intel Optane Persistent Memory modules
version_added: 4.5.0
description:
- - This module allows Configuring Intel Optane Persistent Memory modules
- (PMem) using ipmctl and ndctl command line tools.
+ - This module allows Configuring Intel Optane Persistent Memory modules (PMem) using C(ipmctl) and C(ndctl) command line
+ tools.
requirements:
- - ipmctl and ndctl command line tools
- - xmltodict
+ - C(ipmctl) and C(ndctl) command line tools
+ - xmltodict
extends_documentation_fragment:
- - community.general.attributes
+ - community.general.attributes
attributes:
check_mode:
support: none
@@ -30,33 +29,32 @@ attributes:
options:
appdirect:
description:
- - Percentage of the total capacity to use in AppDirect Mode (V(0)-V(100)).
- - Create AppDirect capacity utilizing hardware interleaving across the
- requested PMem modules if applicable given the specified target.
- - Total of O(appdirect), O(memorymode) and O(reserved) must be V(100)
+ - Percentage of the total capacity to use in AppDirect Mode (V(0)-V(100)).
+ - Create AppDirect capacity utilizing hardware interleaving across the requested PMem modules if applicable given the
+ specified target.
+ - Total of O(appdirect), O(memorymode) and O(reserved) must be V(100).
type: int
appdirect_interleaved:
description:
- - Create AppDirect capacity that is interleaved any other PMem modules.
+ - Create AppDirect capacity that is interleaved any other PMem modules.
type: bool
required: false
default: true
memorymode:
description:
- - Percentage of the total capacity to use in Memory Mode (V(0)-V(100)).
+ - Percentage of the total capacity to use in Memory Mode (V(0)-V(100)).
type: int
reserved:
description:
- - Percentage of the capacity to reserve (V(0)-V(100)). O(reserved) will not be mapped
- into the system physical address space and will be presented as reserved
- capacity with Show Device and Show Memory Resources Commands.
- - O(reserved) will be set automatically if this is not configured.
+ - Percentage of the capacity to reserve (V(0)-V(100)). O(reserved) is not mapped into the system physical address space
+ and is presented as reserved capacity with Show Device and Show Memory Resources Commands.
+ - O(reserved) is set automatically if this is not configured.
type: int
required: false
socket:
description:
- - This enables to set the configuration for each socket by using the socket ID.
- - Total of O(appdirect), O(memorymode) and O(reserved) must be V(100) within one socket.
+ - This enables to set the configuration for each socket by using the socket ID.
+ - Total of O(appdirect), O(memorymode) and O(reserved) must be V(100) within one socket.
type: list
elements: dict
suboptions:
@@ -66,18 +64,18 @@ options:
required: true
appdirect:
description:
- - Percentage of the total capacity to use in AppDirect Mode (V(0)-V(100)) within the socket ID.
+ - Percentage of the total capacity to use in AppDirect Mode (V(0)-V(100)) within the socket ID.
type: int
required: true
appdirect_interleaved:
description:
- - Create AppDirect capacity that is interleaved any other PMem modules within the socket ID.
+ - Create AppDirect capacity that is interleaved any other PMem modules within the socket ID.
type: bool
required: false
default: true
memorymode:
description:
- - Percentage of the total capacity to use in Memory Mode (V(0)-V(100)) within the socket ID.
+ - Percentage of the total capacity to use in Memory Mode (V(0)-V(100)) within the socket ID.
type: int
required: true
reserved:
@@ -86,86 +84,87 @@ options:
type: int
namespace:
description:
- - This enables to set the configuration for the namespace of the PMem.
+ - This enables to set the configuration for the namespace of the PMem.
type: list
elements: dict
suboptions:
mode:
description:
- - The mode of namespace. The detail of the mode is in the man page of ndctl-create-namespace.
+ - The mode of namespace. The detail of the mode is in the man page of ndctl-create-namespace.
type: str
required: true
choices: ['raw', 'sector', 'fsdax', 'devdax']
type:
description:
- - The type of namespace. The detail of the type is in the man page of ndctl-create-namespace.
+ - The type of namespace. The detail of the type is in the man page of ndctl-create-namespace.
type: str
required: false
choices: ['pmem', 'blk']
size:
description:
- - The size of namespace. This option supports the suffixes V(k) or V(K) or V(KB) for KiB,
- V(m) or V(M) or V(MB) for MiB, V(g) or V(G) or V(GB) for GiB and V(t) or V(T) or V(TB) for TiB.
+ - The size of namespace. This option supports the suffixes V(k) or V(K) or V(KB) for KiB, V(m) or V(M) or V(MB)
+ for MiB, V(g) or V(G) or V(GB) for GiB and V(t) or V(T) or V(TB) for TiB.
- This option is required if multiple namespaces are configured.
- If this option is not set, all of the available space of a region is configured.
type: str
required: false
namespace_append:
description:
- - Enable to append the new namespaces to the system.
- - The default is V(false) so the all existing namespaces not listed in O(namespace) are removed.
+ - Enable to append the new namespaces to the system.
+ - The default is V(false) so the all existing namespaces not listed in O(namespace) are removed.
type: bool
default: false
required: false
-'''
+"""
-RETURN = r'''
+RETURN = r"""
reboot_required:
- description: Indicates that the system reboot is required to complete the PMem configuration.
- returned: success
- type: bool
- sample: true
+ description: Indicates that the system reboot is required to complete the PMem configuration.
+ returned: success
+ type: bool
+ sample: true
result:
- description:
- - Shows the value of AppDirect, Memory Mode and Reserved size in bytes.
- - If O(socket) argument is provided, shows the values in each socket with C(socket) which contains the socket ID.
- - If O(namespace) argument is provided, shows the detail of each namespace.
- returned: success
- type: list
- elements: dict
- contains:
- appdirect:
- description: AppDirect size in bytes.
- type: int
- memorymode:
- description: Memory Mode size in bytes.
- type: int
- reserved:
- description: Reserved size in bytes.
- type: int
- socket:
- description: The socket ID to be configured.
- type: int
- namespace:
- description: The list of the detail of namespace.
- type: list
- sample: [
- {
- "appdirect": 111669149696,
- "memorymode": 970662608896,
- "reserved": 3626500096,
- "socket": 0
- },
- {
- "appdirect": 111669149696,
- "memorymode": 970662608896,
- "reserved": 3626500096,
- "socket": 1
- }
- ]
-'''
+ description:
+ - Shows the value of AppDirect, Memory Mode and Reserved size in bytes.
+ - If O(socket) argument is provided, shows the values in each socket with C(socket) which contains the socket ID.
+ - If O(namespace) argument is provided, shows the detail of each namespace.
+ returned: success
+ type: list
+ elements: dict
+ contains:
+ appdirect:
+ description: AppDirect size in bytes.
+ type: int
+ memorymode:
+ description: Memory Mode size in bytes.
+ type: int
+ reserved:
+ description: Reserved size in bytes.
+ type: int
+ socket:
+ description: The socket ID to be configured.
+ type: int
+ namespace:
+ description: The list of the detail of namespace.
+ type: list
+ sample:
+ [
+ {
+ "appdirect": 111669149696,
+ "memorymode": 970662608896,
+ "reserved": 3626500096,
+ "socket": 0
+ },
+ {
+ "appdirect": 111669149696,
+ "memorymode": 970662608896,
+ "reserved": 3626500096,
+ "socket": 1
+ }
+ ]
+"""
-EXAMPLES = r'''
+EXAMPLES = r"""
- name: Configure the Pmem as AppDirect 10, Memory Mode 70, and the Reserved 20 percent.
community.general.pmem:
appdirect: 10
@@ -205,7 +204,7 @@ EXAMPLES = r'''
- size: 320MB
type: pmem
mode: sector
-'''
+"""
import json
import re
@@ -538,7 +537,7 @@ class PersistentMemory(object):
out = xmltodict.parse(goal, dict_constructor=dict)['ConfigGoalList']['ConfigGoal']
for entry in out:
- # Probably it's a bug of ipmctl to show the socket goal
+ # Probably it is a bug of ipmctl to show the socket goal
# which isn't specified by the -socket option.
# Anyway, filter the noise out here:
if skt and skt['id'] != int(entry['SocketID'], 16):
diff --git a/plugins/modules/pnpm.py b/plugins/modules/pnpm.py
index 315b07ba8e..2dad63a608 100644
--- a/plugins/modules/pnpm.py
+++ b/plugins/modules/pnpm.py
@@ -12,13 +12,12 @@ from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = """
----
+DOCUMENTATION = r"""
module: pnpm
-short_description: Manage node.js packages with pnpm
+short_description: Manage Node.js packages with C(pnpm)
version_added: 7.4.0
description:
- - Manage node.js packages with the L(pnpm package manager, https://pnpm.io/).
+ - Manage Node.js packages with the L(pnpm package manager, https://pnpm.io/).
author:
- "Aritra Sen (@aretrosen)"
- "Chris Hoffman (@chrishoffman), creator of NPM Ansible module"
@@ -32,18 +31,18 @@ attributes:
options:
name:
description:
- - The name of a node.js library to install.
- - All packages in package.json are installed if not provided.
+ - The name of a Node.js library to install.
+ - All packages in C(package.json) are installed if not provided.
type: str
required: false
alias:
description:
- - Alias of the node.js library.
+ - Alias of the Node.js library.
type: str
required: false
path:
description:
- - The base path to install the node.js libraries.
+ - The base path to install the Node.js libraries.
type: path
required: false
version:
@@ -53,7 +52,7 @@ options:
required: false
global:
description:
- - Install the node.js library globally.
+ - Install the Node.js library globally.
required: false
default: false
type: bool
@@ -78,14 +77,14 @@ options:
production:
description:
- Install dependencies in production mode.
- - Pnpm will ignore any dependencies under C(devDependencies) in package.json.
+ - Pnpm ignores any dependencies under C(devDependencies) in package.json.
required: false
type: bool
default: false
dev:
description:
- Install dependencies in development mode.
- - Pnpm will ignore any regular dependencies in C(package.json).
+ - Pnpm ignores any regular dependencies in C(package.json).
required: false
default: false
type: bool
@@ -97,7 +96,7 @@ options:
type: bool
state:
description:
- - Installation state of the named node.js library.
+ - Installation state of the named Node.js library.
- If V(absent) is selected, a name option must be provided.
type: str
required: false
@@ -107,36 +106,36 @@ requirements:
- Pnpm executable present in E(PATH).
"""
-EXAMPLES = """
-- name: Install "tailwindcss" node.js package.
+EXAMPLES = r"""
+- name: Install "tailwindcss" Node.js package.
community.general.pnpm:
name: tailwindcss
path: /app/location
-- name: Install "tailwindcss" node.js package on version 3.3.2
+- name: Install "tailwindcss" Node.js package on version 3.3.2
community.general.pnpm:
name: tailwindcss
version: 3.3.2
path: /app/location
-- name: Install "tailwindcss" node.js package globally.
+- name: Install "tailwindcss" Node.js package globally.
community.general.pnpm:
name: tailwindcss
global: true
-- name: Install "tailwindcss" node.js package as dev dependency.
+- name: Install "tailwindcss" Node.js package as dev dependency.
community.general.pnpm:
name: tailwindcss
path: /app/location
dev: true
-- name: Install "tailwindcss" node.js package as optional dependency.
+- name: Install "tailwindcss" Node.js package as optional dependency.
community.general.pnpm:
name: tailwindcss
path: /app/location
optional: true
-- name: Install "tailwindcss" node.js package version 0.1.3 as tailwind-1
+- name: Install "tailwindcss" Node.js package version 0.1.3 as tailwind-1
community.general.pnpm:
name: tailwindcss
alias: tailwind-1
@@ -158,6 +157,7 @@ EXAMPLES = """
path: /app/location
state: latest
"""
+
import json
import os
@@ -339,11 +339,11 @@ class Pnpm(object):
def main():
arg_spec = dict(
- name=dict(default=None),
- alias=dict(default=None),
- path=dict(default=None, type="path"),
- version=dict(default=None),
- executable=dict(default=None, type="path"),
+ name=dict(),
+ alias=dict(),
+ path=dict(type="path"),
+ version=dict(),
+ executable=dict(type="path"),
ignore_scripts=dict(default=False, type="bool"),
no_optional=dict(default=False, type="bool"),
production=dict(default=False, type="bool"),
diff --git a/plugins/modules/portage.py b/plugins/modules/portage.py
index 8ae8efb087..8a00b934dd 100644
--- a/plugins/modules/portage.py
+++ b/plugins/modules/portage.py
@@ -14,13 +14,11 @@ from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
module: portage
short_description: Package manager for Gentoo
description:
- - Manages Gentoo packages
-
+ - Manages Gentoo packages.
extends_documentation_fragment:
- community.general.attributes
@@ -33,21 +31,21 @@ attributes:
options:
package:
description:
- - Package atom or set, for example V(sys-apps/foo) or V(>foo-2.13) or V(@world)
+ - Package atom or set, for example V(sys-apps/foo) or V(>foo-2.13) or V(@world).
aliases: [name]
type: list
elements: str
state:
description:
- - State of the package atom
+ - State of the package atom.
default: "present"
- choices: [ "present", "installed", "emerged", "absent", "removed", "unmerged", "latest" ]
+ choices: ["present", "installed", "emerged", "absent", "removed", "unmerged", "latest"]
type: str
update:
description:
- - Update packages to the best version available (--update)
+ - Update packages to the best version available (C(--update)).
type: bool
default: false
@@ -59,82 +57,81 @@ options:
deep:
description:
- - Consider the entire dependency tree of packages (--deep)
+ - Consider the entire dependency tree of packages (C(--deep)).
type: bool
default: false
newuse:
description:
- - Include installed packages where USE flags have changed (--newuse)
+ - Include installed packages where USE flags have changed (C(--newuse)).
type: bool
default: false
changed_use:
description:
- - Include installed packages where USE flags have changed, except when
- - flags that the user has not enabled are added or removed
- - (--changed-use)
+ - Include installed packages where USE flags have changed, except when.
+ - Flags that the user has not enabled are added or removed.
+ - (C(--changed-use)).
type: bool
default: false
oneshot:
description:
- - Do not add the packages to the world file (--oneshot)
+ - Do not add the packages to the world file (C(--oneshot)).
type: bool
default: false
noreplace:
description:
- - Do not re-emerge installed packages (--noreplace)
+ - Do not re-emerge installed packages (C(--noreplace)).
type: bool
default: true
nodeps:
description:
- - Only merge packages but not their dependencies (--nodeps)
+ - Only merge packages but not their dependencies (C(--nodeps)).
type: bool
default: false
onlydeps:
description:
- - Only merge packages' dependencies but not the packages (--onlydeps)
+ - Only merge packages' dependencies but not the packages (C(--onlydeps)).
type: bool
default: false
depclean:
description:
- - Remove packages not needed by explicitly merged packages (--depclean)
- - If no package is specified, clean up the world's dependencies
- - Otherwise, --depclean serves as a dependency aware version of --unmerge
+ - Remove packages not needed by explicitly merged packages (C(--depclean)).
+ - If no package is specified, clean up the world's dependencies.
+ - Otherwise, C(--depclean) serves as a dependency aware version of C(--unmerge).
type: bool
default: false
quiet:
description:
- - Run emerge in quiet mode (--quiet)
+ - Run emerge in quiet mode (C(--quiet)).
type: bool
default: false
verbose:
description:
- - Run emerge in verbose mode (--verbose)
+ - Run emerge in verbose mode (C(--verbose)).
type: bool
default: false
select:
description:
- If set to V(true), explicitely add the package to the world file.
- - Please note that this option is not used for idempotency, it is only used
- when actually installing a package.
+ - Please note that this option is not used for idempotency, it is only used when actually installing a package.
type: bool
version_added: 8.6.0
sync:
description:
- - Sync package repositories first
- - If V(yes), perform "emerge --sync"
- - If V(web), perform "emerge-webrsync"
- choices: [ "web", "yes", "no" ]
+ - Sync package repositories first.
+ - If V(yes), perform C(emerge --sync).
+ - If V(web), perform C(emerge-webrsync).
+ choices: ["web", "yes", "no"]
type: str
getbinpkgonly:
@@ -171,16 +168,14 @@ options:
jobs:
description:
- Specifies the number of packages to build simultaneously.
- - "Since version 2.6: Value of 0 or False resets any previously added"
- - --jobs setting values
+ - 'Since version 2.6: Value of V(0) or V(false) resets any previously added C(--jobs) setting values.'
type: int
loadavg:
description:
- - Specifies that no new builds should be started if there are
- - other builds running and the load average is at least LOAD
- - "Since version 2.6: Value of 0 or False resets any previously added"
- - --load-average setting values
+ - Specifies that no new builds should be started if there are other builds running and the load average is at least
+ LOAD.
+ - 'Since version 2.6: Value of 0 or False resets any previously added C(--load-average) setting values.'
type: float
withbdeps:
@@ -191,26 +186,24 @@ options:
quietbuild:
description:
- - Redirect all build output to logs alone, and do not display it
- - on stdout (--quiet-build)
+ - Redirect all build output to logs alone, and do not display it on stdout (C(--quiet-build)).
type: bool
default: false
quietfail:
description:
- - Suppresses display of the build log on stdout (--quiet-fail)
- - Only the die message and the path of the build log will be
- - displayed on stdout.
+ - Suppresses display of the build log on stdout (--quiet-fail).
+ - Only the die message and the path of the build log are displayed on stdout.
type: bool
default: false
author:
- - "William L Thomson Jr (@wltjr)"
- - "Yap Sok Ann (@sayap)"
- - "Andrew Udvare (@Tatsh)"
-'''
+ - "William L Thomson Jr (@wltjr)"
+ - "Yap Sok Ann (@sayap)"
+ - "Andrew Udvare (@Tatsh)"
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Make sure package foo is installed
community.general.portage:
package: foo
@@ -252,7 +245,7 @@ EXAMPLES = '''
package: foo
state: absent
depclean: true
-'''
+"""
import os
import re
@@ -517,13 +510,13 @@ portage_absent_states = ['absent', 'unmerged', 'removed']
def main():
module = AnsibleModule(
argument_spec=dict(
- package=dict(type='list', elements='str', default=None, aliases=['name']),
+ package=dict(type='list', elements='str', aliases=['name']),
state=dict(
default=portage_present_states[0],
choices=portage_present_states + portage_absent_states,
),
update=dict(default=False, type='bool'),
- backtrack=dict(default=None, type='int'),
+ backtrack=dict(type='int'),
deep=dict(default=False, type='bool'),
newuse=dict(default=False, type='bool'),
changed_use=dict(default=False, type='bool'),
@@ -532,18 +525,18 @@ def main():
nodeps=dict(default=False, type='bool'),
onlydeps=dict(default=False, type='bool'),
depclean=dict(default=False, type='bool'),
- select=dict(default=None, type='bool'),
+ select=dict(type='bool'),
quiet=dict(default=False, type='bool'),
verbose=dict(default=False, type='bool'),
- sync=dict(default=None, choices=['yes', 'web', 'no']),
+ sync=dict(choices=['yes', 'web', 'no']),
getbinpkgonly=dict(default=False, type='bool'),
getbinpkg=dict(default=False, type='bool'),
usepkgonly=dict(default=False, type='bool'),
usepkg=dict(default=False, type='bool'),
keepgoing=dict(default=False, type='bool'),
- jobs=dict(default=None, type='int'),
- loadavg=dict(default=None, type='float'),
- withbdeps=dict(default=None, type='bool'),
+ jobs=dict(type='int'),
+ loadavg=dict(type='float'),
+ withbdeps=dict(type='bool'),
quietbuild=dict(default=False, type='bool'),
quietfail=dict(default=False, type='bool'),
),
diff --git a/plugins/modules/portinstall.py b/plugins/modules/portinstall.py
index 59dafb1eb8..d4e1591d32 100644
--- a/plugins/modules/portinstall.py
+++ b/plugins/modules/portinstall.py
@@ -12,43 +12,42 @@ from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
module: portinstall
short_description: Installing packages from FreeBSD's ports system
description:
- - Manage packages for FreeBSD using 'portinstall'.
+ - Manage packages for FreeBSD using C(portinstall).
extends_documentation_fragment:
- - community.general.attributes
+ - community.general.attributes
attributes:
- check_mode:
- support: none
- diff_mode:
- support: none
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
options:
- name:
- description:
- - name of package to install/remove
- aliases: [pkg]
- required: true
- type: str
- state:
- description:
- - state of the package
- choices: [ 'present', 'absent' ]
- required: false
- default: present
- type: str
- use_packages:
- description:
- - use packages instead of ports whenever available
- type: bool
- required: false
- default: true
+ name:
+ description:
+ - Name of package to install/remove.
+ aliases: [pkg]
+ required: true
+ type: str
+ state:
+ description:
+ - State of the package.
+ choices: ['present', 'absent']
+ required: false
+ default: present
+ type: str
+ use_packages:
+ description:
+ - Use packages instead of ports whenever available.
+ type: bool
+ required: false
+ default: true
author: "berenddeboer (@berenddeboer)"
-'''
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Install package foo
community.general.portinstall:
name: foo
@@ -63,7 +62,7 @@ EXAMPLES = '''
community.general.portinstall:
name: foo,bar
state: absent
-'''
+"""
import re
diff --git a/plugins/modules/pritunl_org.py b/plugins/modules/pritunl_org.py
index 4945a8fc20..f87813031b 100644
--- a/plugins/modules/pritunl_org.py
+++ b/plugins/modules/pritunl_org.py
@@ -8,54 +8,48 @@ from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = """
----
+DOCUMENTATION = r"""
module: pritunl_org
author: Florian Dambrine (@Lowess)
version_added: 2.5.0
short_description: Manages Pritunl Organizations using the Pritunl API
description:
- - A module to manage Pritunl organizations using the Pritunl API.
+ - A module to manage Pritunl organizations using the Pritunl API.
extends_documentation_fragment:
- - community.general.pritunl
- - community.general.attributes
+ - community.general.pritunl
+ - community.general.attributes
attributes:
- check_mode:
- support: none
- diff_mode:
- support: none
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
options:
- name:
- type: str
- required: true
- aliases:
- - org
- description:
- - The name of the organization to manage in Pritunl.
-
- force:
- type: bool
- default: false
- description:
- - If O(force) is V(true) and O(state) is V(absent), the module
- will delete the organization, no matter if it contains users
- or not. By default O(force) is V(false), which will cause the
- module to fail the deletion of the organization when it contains
- users.
-
- state:
- type: str
- default: 'present'
- choices:
- - present
- - absent
- description:
- - If V(present), the module adds organization O(name) to
- Pritunl. If V(absent), attempt to delete the organization
- from Pritunl (please read about O(force) usage).
+ name:
+ type: str
+ required: true
+ aliases:
+ - org
+ description:
+ - The name of the organization to manage in Pritunl.
+ force:
+ type: bool
+ default: false
+ description:
+ - If O(force) is V(true) and O(state) is V(absent), the module deletes the organization, no matter if it contains users
+ or not. By default O(force) is V(false), which causes the module to fail the deletion of the organization when it
+ contains users.
+ state:
+ type: str
+ default: 'present'
+ choices:
+ - present
+ - absent
+ description:
+ - If V(present), the module adds organization O(name) to Pritunl. If V(absent), attempt to delete the organization from
+ Pritunl (please read about O(force) usage).
"""
-EXAMPLES = """
+EXAMPLES = r"""
- name: Ensure the organization named MyOrg exists
community.general.pritunl_org:
state: present
@@ -67,20 +61,20 @@ EXAMPLES = """
name: MyOrg
"""
-RETURN = """
+RETURN = r"""
response:
- description: JSON representation of a Pritunl Organization.
- returned: success
- type: dict
- sample:
- {
- "auth_api": false,
- "name": "Foo",
- "auth_token": null,
- "user_count": 0,
- "auth_secret": null,
- "id": "csftwlu6uhralzi2dpmhekz3",
- }
+ description: JSON representation of a Pritunl Organization.
+ returned: success
+ type: dict
+ sample:
+ {
+ "auth_api": false,
+ "name": "Foo",
+ "auth_token": null,
+ "user_count": 0,
+ "auth_secret": null,
+ "id": "csftwlu6uhralzi2dpmhekz3"
+ }
"""
@@ -182,10 +176,8 @@ def main():
argument_spec.update(
dict(
name=dict(required=True, type="str", aliases=["org"]),
- force=dict(required=False, type="bool", default=False),
- state=dict(
- required=False, choices=["present", "absent"], default="present"
- ),
+ force=dict(type="bool", default=False),
+ state=dict(choices=["present", "absent"], default="present"),
)
)
diff --git a/plugins/modules/pritunl_org_info.py b/plugins/modules/pritunl_org_info.py
index 979e29b5a0..952acd8963 100644
--- a/plugins/modules/pritunl_org_info.py
+++ b/plugins/modules/pritunl_org_info.py
@@ -8,32 +8,29 @@ from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = """
----
+DOCUMENTATION = r"""
module: pritunl_org_info
author: Florian Dambrine (@Lowess)
version_added: 2.5.0
short_description: List Pritunl Organizations using the Pritunl API
description:
- - A module to list Pritunl organizations using the Pritunl API.
+ - A module to list Pritunl organizations using the Pritunl API.
extends_documentation_fragment:
- - community.general.pritunl
- - community.general.attributes
- - community.general.attributes.info_module
+ - community.general.pritunl
+ - community.general.attributes
+ - community.general.attributes.info_module
options:
- organization:
- type: str
- required: false
- aliases:
- - org
- default: null
- description:
- - Name of the Pritunl organization to search for.
- If none provided, the module will return all Pritunl
- organizations.
+ organization:
+ type: str
+ required: false
+ aliases:
+ - org
+ default: null
+ description:
+ - Name of the Pritunl organization to search for. If none provided, the module returns all Pritunl organizations.
"""
-EXAMPLES = """
+EXAMPLES = r"""
- name: List all existing Pritunl organizations
community.general.pritunl_org_info:
@@ -42,39 +39,39 @@ EXAMPLES = """
organization: MyOrg
"""
-RETURN = """
+RETURN = r"""
organizations:
- description: List of Pritunl organizations.
- returned: success
- type: list
- elements: dict
- sample:
- [
- {
- "auth_api": false,
- "name": "FooOrg",
- "auth_token": null,
- "user_count": 0,
- "auth_secret": null,
- "id": "csftwlu6uhralzi2dpmhekz3",
- },
- {
- "auth_api": false,
- "name": "MyOrg",
- "auth_token": null,
- "user_count": 3,
- "auth_secret": null,
- "id": "58070daee63f3b2e6e472c36",
- },
- {
- "auth_api": false,
- "name": "BarOrg",
- "auth_token": null,
- "user_count": 0,
- "auth_secret": null,
- "id": "v1sncsxxybnsylc8gpqg85pg",
- }
- ]
+ description: List of Pritunl organizations.
+ returned: success
+ type: list
+ elements: dict
+ sample:
+ [
+ {
+ "auth_api": false,
+ "name": "FooOrg",
+ "auth_token": null,
+ "user_count": 0,
+ "auth_secret": null,
+ "id": "csftwlu6uhralzi2dpmhekz3"
+ },
+ {
+ "auth_api": false,
+ "name": "MyOrg",
+ "auth_token": null,
+ "user_count": 3,
+ "auth_secret": null,
+ "id": "58070daee63f3b2e6e472c36"
+ },
+ {
+ "auth_api": false,
+ "name": "BarOrg",
+ "auth_token": null,
+ "user_count": 0,
+ "auth_secret": null,
+ "id": "v1sncsxxybnsylc8gpqg85pg"
+ }
+ ]
"""
from ansible.module_utils.basic import AnsibleModule
@@ -116,7 +113,7 @@ def main():
argument_spec.update(
dict(
- organization=dict(required=False, type="str", default=None, aliases=["org"])
+ organization=dict(type="str", aliases=["org"])
)
)
diff --git a/plugins/modules/pritunl_user.py b/plugins/modules/pritunl_user.py
index bdbc335d90..45de07eba6 100644
--- a/plugins/modules/pritunl_user.py
+++ b/plugins/modules/pritunl_user.py
@@ -8,97 +8,87 @@ from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = """
----
+DOCUMENTATION = r"""
module: pritunl_user
author: "Florian Dambrine (@Lowess)"
version_added: 2.3.0
short_description: Manage Pritunl Users using the Pritunl API
description:
- - A module to manage Pritunl users using the Pritunl API.
+ - A module to manage Pritunl users using the Pritunl API.
extends_documentation_fragment:
- - community.general.pritunl
- - community.general.attributes
+ - community.general.pritunl
+ - community.general.attributes
attributes:
- check_mode:
- support: none
- diff_mode:
- support: none
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
options:
- organization:
- type: str
- required: true
- aliases:
- - org
- description:
- - The name of the organization the user is part of.
-
- state:
- type: str
- default: 'present'
- choices:
- - present
- - absent
- description:
- - If V(present), the module adds user O(user_name) to
- the Pritunl O(organization). If V(absent), removes the user
- O(user_name) from the Pritunl O(organization).
-
- user_name:
- type: str
- required: true
- default: null
- description:
- - Name of the user to create or delete from Pritunl.
-
- user_email:
- type: str
- required: false
- default: null
- description:
- - Email address associated with the user O(user_name).
-
- user_type:
- type: str
- required: false
- default: client
- choices:
- - client
- - server
- description:
- - Type of the user O(user_name).
-
- user_groups:
- type: list
- elements: str
- required: false
- default: null
- description:
- - List of groups associated with the user O(user_name).
-
- user_disabled:
- type: bool
- required: false
- default: null
- description:
- - Enable/Disable the user O(user_name).
-
- user_gravatar:
- type: bool
- required: false
- default: null
- description:
- - Enable/Disable Gravatar usage for the user O(user_name).
-
- user_mac_addresses:
- type: list
- elements: str
- description:
- - Allowed MAC addresses for the user O(user_name).
- version_added: 5.0.0
+ organization:
+ type: str
+ required: true
+ aliases:
+ - org
+ description:
+ - The name of the organization the user is part of.
+ state:
+ type: str
+ default: 'present'
+ choices:
+ - present
+ - absent
+ description:
+ - If V(present), the module adds user O(user_name) to the Pritunl O(organization). If V(absent), removes the user O(user_name)
+ from the Pritunl O(organization).
+ user_name:
+ type: str
+ required: true
+ default:
+ description:
+ - Name of the user to create or delete from Pritunl.
+ user_email:
+ type: str
+ required: false
+ default:
+ description:
+ - Email address associated with the user O(user_name).
+ user_type:
+ type: str
+ required: false
+ default: client
+ choices:
+ - client
+ - server
+ description:
+ - Type of the user O(user_name).
+ user_groups:
+ type: list
+ elements: str
+ required: false
+ default:
+ description:
+ - List of groups associated with the user O(user_name).
+ user_disabled:
+ type: bool
+ required: false
+ default:
+ description:
+ - Enable/Disable the user O(user_name).
+ user_gravatar:
+ type: bool
+ required: false
+ default:
+ description:
+ - Enable/Disable Gravatar usage for the user O(user_name).
+ user_mac_addresses:
+ type: list
+ elements: str
+ description:
+ - Allowed MAC addresses for the user O(user_name).
+ version_added: 5.0.0
"""
-EXAMPLES = """
+EXAMPLES = r"""
- name: Create the user Foo with email address foo@bar.com in MyOrg
community.general.pritunl_user:
state: present
@@ -123,37 +113,38 @@ EXAMPLES = """
user_name: Foo
"""
-RETURN = """
+RETURN = r"""
response:
- description: JSON representation of Pritunl Users.
- returned: success
- type: dict
- sample:
- {
- "audit": false,
- "auth_type": "google",
- "bypass_secondary": false,
- "client_to_client": false,
- "disabled": false,
- "dns_mapping": null,
- "dns_servers": null,
- "dns_suffix": null,
- "email": "foo@bar.com",
- "gravatar": true,
- "groups": [
- "foo", "bar"
- ],
- "id": "5d070dafe63q3b2e6s472c3b",
- "name": "foo@acme.com",
- "network_links": [],
- "organization": "58070daee6sf342e6e4s2c36",
- "organization_name": "Acme",
- "otp_auth": true,
- "otp_secret": "35H5EJA3XB2$4CWG",
- "pin": false,
- "port_forwarding": [],
- "servers": [],
- }
+ description: JSON representation of Pritunl Users.
+ returned: success
+ type: dict
+ sample:
+ {
+ "audit": false,
+ "auth_type": "google",
+ "bypass_secondary": false,
+ "client_to_client": false,
+ "disabled": false,
+ "dns_mapping": null,
+ "dns_servers": null,
+ "dns_suffix": null,
+ "email": "foo@bar.com",
+ "gravatar": true,
+ "groups": [
+ "foo",
+ "bar"
+ ],
+ "id": "5d070dafe63q3b2e6s472c3b",
+ "name": "foo@acme.com",
+ "network_links": [],
+ "organization": "58070daee6sf342e6e4s2c36",
+ "organization_name": "Acme",
+ "otp_auth": true,
+ "otp_secret": "35H5EJA3XB2$4CWG",
+ "pin": false,
+ "port_forwarding": [],
+ "servers": []
+ }
"""
@@ -329,18 +320,14 @@ def main():
argument_spec.update(
dict(
organization=dict(required=True, type="str", aliases=["org"]),
- state=dict(
- required=False, choices=["present", "absent"], default="present"
- ),
+ state=dict(choices=["present", "absent"], default="present"),
user_name=dict(required=True, type="str"),
- user_type=dict(
- required=False, choices=["client", "server"], default="client"
- ),
- user_email=dict(required=False, type="str", default=None),
- user_groups=dict(required=False, type="list", elements="str", default=None),
- user_disabled=dict(required=False, type="bool", default=None),
- user_gravatar=dict(required=False, type="bool", default=None),
- user_mac_addresses=dict(required=False, type="list", elements="str", default=None),
+ user_type=dict(choices=["client", "server"], default="client"),
+ user_email=dict(type="str"),
+ user_groups=dict(type="list", elements="str"),
+ user_disabled=dict(type="bool"),
+ user_gravatar=dict(type="bool"),
+ user_mac_addresses=dict(type="list", elements="str"),
)
)
diff --git a/plugins/modules/pritunl_user_info.py b/plugins/modules/pritunl_user_info.py
index 3f8f62003f..2e8180675a 100644
--- a/plugins/modules/pritunl_user_info.py
+++ b/plugins/modules/pritunl_user_info.py
@@ -8,45 +8,42 @@ from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = """
----
+DOCUMENTATION = r"""
module: pritunl_user_info
author: "Florian Dambrine (@Lowess)"
version_added: 2.3.0
short_description: List Pritunl Users using the Pritunl API
description:
- - A module to list Pritunl users using the Pritunl API.
+ - A module to list Pritunl users using the Pritunl API.
extends_documentation_fragment:
- - community.general.pritunl
- - community.general.attributes
- - community.general.attributes.info_module
+ - community.general.pritunl
+ - community.general.attributes
+ - community.general.attributes.info_module
options:
- organization:
- type: str
- required: true
- aliases:
- - org
- description:
- - The name of the organization the user is part of.
-
- user_name:
- type: str
- required: false
- description:
- - Name of the user to filter on Pritunl.
-
- user_type:
- type: str
- required: false
- default: client
- choices:
- - client
- - server
- description:
- - Type of the user O(user_name).
+ organization:
+ type: str
+ required: true
+ aliases:
+ - org
+ description:
+ - The name of the organization the user is part of.
+ user_name:
+ type: str
+ required: false
+ description:
+ - Name of the user to filter on Pritunl.
+ user_type:
+ type: str
+ required: false
+ default: client
+ choices:
+ - client
+ - server
+ description:
+ - Type of the user O(user_name).
"""
-EXAMPLES = """
+EXAMPLES = r"""
- name: List all existing users part of the organization MyOrg
community.general.pritunl_user_info:
state: list
@@ -59,40 +56,41 @@ EXAMPLES = """
user_name: Florian
"""
-RETURN = """
+RETURN = r"""
users:
- description: List of Pritunl users.
- returned: success
- type: list
- elements: dict
- sample:
- [
- {
- "audit": false,
- "auth_type": "google",
- "bypass_secondary": false,
- "client_to_client": false,
- "disabled": false,
- "dns_mapping": null,
- "dns_servers": null,
- "dns_suffix": null,
- "email": "foo@bar.com",
- "gravatar": true,
- "groups": [
- "foo", "bar"
- ],
- "id": "5d070dafe63q3b2e6s472c3b",
- "name": "foo@acme.com",
- "network_links": [],
- "organization": "58070daee6sf342e6e4s2c36",
- "organization_name": "Acme",
- "otp_auth": true,
- "otp_secret": "35H5EJA3XB2$4CWG",
- "pin": false,
- "port_forwarding": [],
- "servers": [],
- }
- ]
+ description: List of Pritunl users.
+ returned: success
+ type: list
+ elements: dict
+ sample:
+ [
+ {
+ "audit": false,
+ "auth_type": "google",
+ "bypass_secondary": false,
+ "client_to_client": false,
+ "disabled": false,
+ "dns_mapping": null,
+ "dns_servers": null,
+ "dns_suffix": null,
+ "email": "foo@bar.com",
+ "gravatar": true,
+ "groups": [
+ "foo",
+ "bar"
+ ],
+ "id": "5d070dafe63q3b2e6s472c3b",
+ "name": "foo@acme.com",
+ "network_links": [],
+ "organization": "58070daee6sf342e6e4s2c36",
+ "organization_name": "Acme",
+ "otp_auth": true,
+ "otp_secret": "35H5EJA3XB2$4CWG",
+ "pin": false,
+ "port_forwarding": [],
+ "servers": []
+ }
+ ]
"""
from ansible.module_utils.basic import AnsibleModule
@@ -153,12 +151,8 @@ def main():
argument_spec.update(
dict(
organization=dict(required=True, type="str", aliases=["org"]),
- user_name=dict(required=False, type="str", default=None),
- user_type=dict(
- required=False,
- choices=["client", "server"],
- default="client",
- ),
+ user_name=dict(type="str"),
+ user_type=dict(choices=["client", "server"], default="client"),
)
)
diff --git a/plugins/modules/profitbricks.py b/plugins/modules/profitbricks.py
deleted file mode 100644
index 875bd78c4e..0000000000
--- a/plugins/modules/profitbricks.py
+++ /dev/null
@@ -1,666 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-# Copyright Ansible Project
-# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
----
-module: profitbricks
-short_description: Create, destroy, start, stop, and reboot a ProfitBricks virtual machine
-description:
- - Create, destroy, update, start, stop, and reboot a ProfitBricks virtual machine. When the virtual machine is created it can optionally wait
- for it to be 'running' before returning. This module has a dependency on profitbricks >= 1.0.0
-extends_documentation_fragment:
- - community.general.attributes
-attributes:
- check_mode:
- support: none
- diff_mode:
- support: none
-options:
- auto_increment:
- description:
- - Whether or not to increment a single number in the name for created virtual machines.
- type: bool
- default: true
- name:
- description:
- - The name of the virtual machine.
- type: str
- image:
- description:
- - The system image ID for creating the virtual machine, e.g. a3eae284-a2fe-11e4-b187-5f1f641608c8.
- type: str
- image_password:
- description:
- - Password set for the administrative user.
- type: str
- ssh_keys:
- description:
- - Public SSH keys allowing access to the virtual machine.
- type: list
- elements: str
- default: []
- datacenter:
- description:
- - The datacenter to provision this virtual machine.
- type: str
- cores:
- description:
- - The number of CPU cores to allocate to the virtual machine.
- default: 2
- type: int
- ram:
- description:
- - The amount of memory to allocate to the virtual machine.
- default: 2048
- type: int
- cpu_family:
- description:
- - The CPU family type to allocate to the virtual machine.
- type: str
- default: AMD_OPTERON
- choices: [ "AMD_OPTERON", "INTEL_XEON" ]
- volume_size:
- description:
- - The size in GB of the boot volume.
- type: int
- default: 10
- bus:
- description:
- - The bus type for the volume.
- type: str
- default: VIRTIO
- choices: [ "IDE", "VIRTIO"]
- instance_ids:
- description:
- - list of instance ids, currently only used when state='absent' to remove instances.
- type: list
- elements: str
- default: []
- count:
- description:
- - The number of virtual machines to create.
- type: int
- default: 1
- location:
- description:
- - The datacenter location. Use only if you want to create the Datacenter or else this value is ignored.
- type: str
- default: us/las
- choices: [ "us/las", "de/fra", "de/fkb" ]
- assign_public_ip:
- description:
- - This will assign the machine to the public LAN. If no LAN exists with public Internet access it is created.
- type: bool
- default: false
- lan:
- description:
- - The ID of the LAN you wish to add the servers to.
- type: int
- default: 1
- subscription_user:
- description:
- - The ProfitBricks username. Overrides the PB_SUBSCRIPTION_ID environment variable.
- type: str
- subscription_password:
- description:
- - THe ProfitBricks password. Overrides the PB_PASSWORD environment variable.
- type: str
- wait:
- description:
- - wait for the instance to be in state 'running' before returning
- type: bool
- default: true
- wait_timeout:
- description:
- - how long before wait gives up, in seconds
- type: int
- default: 600
- remove_boot_volume:
- description:
- - remove the bootVolume of the virtual machine you're destroying.
- type: bool
- default: true
- state:
- description:
- - create or terminate instances
- - 'The choices available are: V(running), V(stopped), V(absent), V(present).'
- type: str
- default: 'present'
- disk_type:
- description:
- - the type of disk to be allocated.
- type: str
- choices: [SSD, HDD]
- default: HDD
-
-requirements:
- - "profitbricks"
-author: Matt Baldwin (@baldwinSPC)
-'''
-
-EXAMPLES = '''
-
-# Note: These examples do not set authentication details, see the AWS Guide for details.
-
-# Provisioning example
-- name: Create three servers and enumerate their names
- community.general.profitbricks:
- datacenter: Tardis One
- name: web%02d.stackpointcloud.com
- cores: 4
- ram: 2048
- volume_size: 50
- cpu_family: INTEL_XEON
- image: a3eae284-a2fe-11e4-b187-5f1f641608c8
- location: us/las
- count: 3
- assign_public_ip: true
-
-- name: Remove virtual machines
- community.general.profitbricks:
- datacenter: Tardis One
- instance_ids:
- - 'web001.stackpointcloud.com'
- - 'web002.stackpointcloud.com'
- - 'web003.stackpointcloud.com'
- wait_timeout: 500
- state: absent
-
-- name: Start virtual machines
- community.general.profitbricks:
- datacenter: Tardis One
- instance_ids:
- - 'web001.stackpointcloud.com'
- - 'web002.stackpointcloud.com'
- - 'web003.stackpointcloud.com'
- wait_timeout: 500
- state: running
-
-- name: Stop virtual machines
- community.general.profitbricks:
- datacenter: Tardis One
- instance_ids:
- - 'web001.stackpointcloud.com'
- - 'web002.stackpointcloud.com'
- - 'web003.stackpointcloud.com'
- wait_timeout: 500
- state: stopped
-'''
-
-import re
-import uuid
-import time
-import traceback
-
-HAS_PB_SDK = True
-
-try:
- from profitbricks.client import ProfitBricksService, Volume, Server, Datacenter, NIC, LAN
-except ImportError:
- HAS_PB_SDK = False
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible.module_utils.six.moves import xrange
-from ansible.module_utils.common.text.converters import to_native
-
-
-LOCATIONS = ['us/las',
- 'de/fra',
- 'de/fkb']
-
-uuid_match = re.compile(
- r'[\w]{8}-[\w]{4}-[\w]{4}-[\w]{4}-[\w]{12}', re.I)
-
-
-def _wait_for_completion(profitbricks, promise, wait_timeout, msg):
- if not promise:
- return
- wait_timeout = time.time() + wait_timeout
- while wait_timeout > time.time():
- time.sleep(5)
- operation_result = profitbricks.get_request(
- request_id=promise['requestId'],
- status=True)
-
- if operation_result['metadata']['status'] == "DONE":
- return
- elif operation_result['metadata']['status'] == "FAILED":
- raise Exception(
- 'Request failed to complete ' + msg + ' "' + str(
- promise['requestId']) + '" to complete.')
-
- raise Exception(
- 'Timed out waiting for async operation ' + msg + ' "' + str(
- promise['requestId']
- ) + '" to complete.')
-
-
-def _create_machine(module, profitbricks, datacenter, name):
- cores = module.params.get('cores')
- ram = module.params.get('ram')
- cpu_family = module.params.get('cpu_family')
- volume_size = module.params.get('volume_size')
- disk_type = module.params.get('disk_type')
- image_password = module.params.get('image_password')
- ssh_keys = module.params.get('ssh_keys')
- bus = module.params.get('bus')
- lan = module.params.get('lan')
- assign_public_ip = module.params.get('assign_public_ip')
- subscription_user = module.params.get('subscription_user')
- subscription_password = module.params.get('subscription_password')
- location = module.params.get('location')
- image = module.params.get('image')
- assign_public_ip = module.boolean(module.params.get('assign_public_ip'))
- wait = module.params.get('wait')
- wait_timeout = module.params.get('wait_timeout')
-
- if assign_public_ip:
- public_found = False
-
- lans = profitbricks.list_lans(datacenter)
- for lan in lans['items']:
- if lan['properties']['public']:
- public_found = True
- lan = lan['id']
-
- if not public_found:
- i = LAN(
- name='public',
- public=True)
-
- lan_response = profitbricks.create_lan(datacenter, i)
- _wait_for_completion(profitbricks, lan_response,
- wait_timeout, "_create_machine")
- lan = lan_response['id']
-
- v = Volume(
- name=str(uuid.uuid4()).replace('-', '')[:10],
- size=volume_size,
- image=image,
- image_password=image_password,
- ssh_keys=ssh_keys,
- disk_type=disk_type,
- bus=bus)
-
- n = NIC(
- lan=int(lan)
- )
-
- s = Server(
- name=name,
- ram=ram,
- cores=cores,
- cpu_family=cpu_family,
- create_volumes=[v],
- nics=[n],
- )
-
- try:
- create_server_response = profitbricks.create_server(
- datacenter_id=datacenter, server=s)
-
- _wait_for_completion(profitbricks, create_server_response,
- wait_timeout, "create_virtual_machine")
-
- server_response = profitbricks.get_server(
- datacenter_id=datacenter,
- server_id=create_server_response['id'],
- depth=3
- )
- except Exception as e:
- module.fail_json(msg="failed to create the new server: %s" % str(e))
- else:
- return server_response
-
-
-def _startstop_machine(module, profitbricks, datacenter_id, server_id):
- state = module.params.get('state')
-
- try:
- if state == 'running':
- profitbricks.start_server(datacenter_id, server_id)
- else:
- profitbricks.stop_server(datacenter_id, server_id)
-
- return True
- except Exception as e:
- module.fail_json(msg="failed to start or stop the virtual machine %s at %s: %s" % (server_id, datacenter_id, str(e)))
-
-
-def _create_datacenter(module, profitbricks):
- datacenter = module.params.get('datacenter')
- location = module.params.get('location')
- wait_timeout = module.params.get('wait_timeout')
-
- i = Datacenter(
- name=datacenter,
- location=location
- )
-
- try:
- datacenter_response = profitbricks.create_datacenter(datacenter=i)
-
- _wait_for_completion(profitbricks, datacenter_response,
- wait_timeout, "_create_datacenter")
-
- return datacenter_response
- except Exception as e:
- module.fail_json(msg="failed to create the new server(s): %s" % str(e))
-
-
-def create_virtual_machine(module, profitbricks):
- """
- Create new virtual machine
-
- module : AnsibleModule object
- community.general.profitbricks: authenticated profitbricks object
-
- Returns:
- True if a new virtual machine was created, false otherwise
- """
- datacenter = module.params.get('datacenter')
- name = module.params.get('name')
- auto_increment = module.params.get('auto_increment')
- count = module.params.get('count')
- lan = module.params.get('lan')
- wait_timeout = module.params.get('wait_timeout')
- failed = True
- datacenter_found = False
-
- virtual_machines = []
- virtual_machine_ids = []
-
- # Locate UUID for datacenter if referenced by name.
- datacenter_list = profitbricks.list_datacenters()
- datacenter_id = _get_datacenter_id(datacenter_list, datacenter)
- if datacenter_id:
- datacenter_found = True
-
- if not datacenter_found:
- datacenter_response = _create_datacenter(module, profitbricks)
- datacenter_id = datacenter_response['id']
-
- _wait_for_completion(profitbricks, datacenter_response,
- wait_timeout, "create_virtual_machine")
-
- if auto_increment:
- numbers = set()
- count_offset = 1
-
- try:
- name % 0
- except TypeError as e:
- if e.message.startswith('not all'):
- name = '%s%%d' % name
- else:
- module.fail_json(msg=e.message, exception=traceback.format_exc())
-
- number_range = xrange(count_offset, count_offset + count + len(numbers))
- available_numbers = list(set(number_range).difference(numbers))
- names = []
- numbers_to_use = available_numbers[:count]
- for number in numbers_to_use:
- names.append(name % number)
- else:
- names = [name]
-
- # Prefetch a list of servers for later comparison.
- server_list = profitbricks.list_servers(datacenter_id)
- for name in names:
- # Skip server creation if the server already exists.
- if _get_server_id(server_list, name):
- continue
-
- create_response = _create_machine(module, profitbricks, str(datacenter_id), name)
- nics = profitbricks.list_nics(datacenter_id, create_response['id'])
- for n in nics['items']:
- if lan == n['properties']['lan']:
- create_response.update({'public_ip': n['properties']['ips'][0]})
-
- virtual_machines.append(create_response)
-
- failed = False
-
- results = {
- 'failed': failed,
- 'machines': virtual_machines,
- 'action': 'create',
- 'instance_ids': {
- 'instances': [i['id'] for i in virtual_machines],
- }
- }
-
- return results
-
-
-def remove_virtual_machine(module, profitbricks):
- """
- Removes a virtual machine.
-
- This will remove the virtual machine along with the bootVolume.
-
- module : AnsibleModule object
- community.general.profitbricks: authenticated profitbricks object.
-
- Not yet supported: handle deletion of attached data disks.
-
- Returns:
- True if a new virtual server was deleted, false otherwise
- """
- datacenter = module.params.get('datacenter')
- instance_ids = module.params.get('instance_ids')
- remove_boot_volume = module.params.get('remove_boot_volume')
- changed = False
-
- if not isinstance(module.params.get('instance_ids'), list) or len(module.params.get('instance_ids')) < 1:
- module.fail_json(msg='instance_ids should be a list of virtual machine ids or names, aborting')
-
- # Locate UUID for datacenter if referenced by name.
- datacenter_list = profitbricks.list_datacenters()
- datacenter_id = _get_datacenter_id(datacenter_list, datacenter)
- if not datacenter_id:
- module.fail_json(msg='Virtual data center \'%s\' not found.' % str(datacenter))
-
- # Prefetch server list for later comparison.
- server_list = profitbricks.list_servers(datacenter_id)
- for instance in instance_ids:
- # Locate UUID for server if referenced by name.
- server_id = _get_server_id(server_list, instance)
- if server_id:
- # Remove the server's boot volume
- if remove_boot_volume:
- _remove_boot_volume(module, profitbricks, datacenter_id, server_id)
-
- # Remove the server
- try:
- server_response = profitbricks.delete_server(datacenter_id, server_id)
- except Exception as e:
- module.fail_json(msg="failed to terminate the virtual server: %s" % to_native(e), exception=traceback.format_exc())
- else:
- changed = True
-
- return changed
-
-
-def _remove_boot_volume(module, profitbricks, datacenter_id, server_id):
- """
- Remove the boot volume from the server
- """
- try:
- server = profitbricks.get_server(datacenter_id, server_id)
- volume_id = server['properties']['bootVolume']['id']
- volume_response = profitbricks.delete_volume(datacenter_id, volume_id)
- except Exception as e:
- module.fail_json(msg="failed to remove the server's boot volume: %s" % to_native(e), exception=traceback.format_exc())
-
-
-def startstop_machine(module, profitbricks, state):
- """
- Starts or Stops a virtual machine.
-
- module : AnsibleModule object
- community.general.profitbricks: authenticated profitbricks object.
-
- Returns:
- True when the servers process the action successfully, false otherwise.
- """
- if not isinstance(module.params.get('instance_ids'), list) or len(module.params.get('instance_ids')) < 1:
- module.fail_json(msg='instance_ids should be a list of virtual machine ids or names, aborting')
-
- wait = module.params.get('wait')
- wait_timeout = module.params.get('wait_timeout')
- changed = False
-
- datacenter = module.params.get('datacenter')
- instance_ids = module.params.get('instance_ids')
-
- # Locate UUID for datacenter if referenced by name.
- datacenter_list = profitbricks.list_datacenters()
- datacenter_id = _get_datacenter_id(datacenter_list, datacenter)
- if not datacenter_id:
- module.fail_json(msg='Virtual data center \'%s\' not found.' % str(datacenter))
-
- # Prefetch server list for later comparison.
- server_list = profitbricks.list_servers(datacenter_id)
- for instance in instance_ids:
- # Locate UUID of server if referenced by name.
- server_id = _get_server_id(server_list, instance)
- if server_id:
- _startstop_machine(module, profitbricks, datacenter_id, server_id)
- changed = True
-
- if wait:
- wait_timeout = time.time() + wait_timeout
- while wait_timeout > time.time():
- matched_instances = []
- for res in profitbricks.list_servers(datacenter_id)['items']:
- if state == 'running':
- if res['properties']['vmState'].lower() == state:
- matched_instances.append(res)
- elif state == 'stopped':
- if res['properties']['vmState'].lower() == 'shutoff':
- matched_instances.append(res)
-
- if len(matched_instances) < len(instance_ids):
- time.sleep(5)
- else:
- break
-
- if wait_timeout <= time.time():
- # waiting took too long
- module.fail_json(msg="wait for virtual machine state timeout on %s" % time.asctime())
-
- return (changed)
-
-
-def _get_datacenter_id(datacenters, identity):
- """
- Fetch and return datacenter UUID by datacenter name if found.
- """
- for datacenter in datacenters['items']:
- if identity in (datacenter['properties']['name'], datacenter['id']):
- return datacenter['id']
- return None
-
-
-def _get_server_id(servers, identity):
- """
- Fetch and return server UUID by server name if found.
- """
- for server in servers['items']:
- if identity in (server['properties']['name'], server['id']):
- return server['id']
- return None
-
-
-def main():
- module = AnsibleModule(
- argument_spec=dict(
- datacenter=dict(),
- name=dict(),
- image=dict(),
- cores=dict(type='int', default=2),
- ram=dict(type='int', default=2048),
- cpu_family=dict(choices=['AMD_OPTERON', 'INTEL_XEON'],
- default='AMD_OPTERON'),
- volume_size=dict(type='int', default=10),
- disk_type=dict(choices=['HDD', 'SSD'], default='HDD'),
- image_password=dict(no_log=True),
- ssh_keys=dict(type='list', elements='str', default=[], no_log=False),
- bus=dict(choices=['VIRTIO', 'IDE'], default='VIRTIO'),
- lan=dict(type='int', default=1),
- count=dict(type='int', default=1),
- auto_increment=dict(type='bool', default=True),
- instance_ids=dict(type='list', elements='str', default=[]),
- subscription_user=dict(),
- subscription_password=dict(no_log=True),
- location=dict(choices=LOCATIONS, default='us/las'),
- assign_public_ip=dict(type='bool', default=False),
- wait=dict(type='bool', default=True),
- wait_timeout=dict(type='int', default=600),
- remove_boot_volume=dict(type='bool', default=True),
- state=dict(default='present'),
- )
- )
-
- if not HAS_PB_SDK:
- module.fail_json(msg='profitbricks required for this module')
-
- subscription_user = module.params.get('subscription_user')
- subscription_password = module.params.get('subscription_password')
-
- profitbricks = ProfitBricksService(
- username=subscription_user,
- password=subscription_password)
-
- state = module.params.get('state')
-
- if state == 'absent':
- if not module.params.get('datacenter'):
- module.fail_json(msg='datacenter parameter is required ' +
- 'for running or stopping machines.')
-
- try:
- (changed) = remove_virtual_machine(module, profitbricks)
- module.exit_json(changed=changed)
- except Exception as e:
- module.fail_json(msg='failed to set instance state: %s' % to_native(e), exception=traceback.format_exc())
-
- elif state in ('running', 'stopped'):
- if not module.params.get('datacenter'):
- module.fail_json(msg='datacenter parameter is required for ' +
- 'running or stopping machines.')
- try:
- (changed) = startstop_machine(module, profitbricks, state)
- module.exit_json(changed=changed)
- except Exception as e:
- module.fail_json(msg='failed to set instance state: %s' % to_native(e), exception=traceback.format_exc())
-
- elif state == 'present':
- if not module.params.get('name'):
- module.fail_json(msg='name parameter is required for new instance')
- if not module.params.get('image'):
- module.fail_json(msg='image parameter is required for new instance')
- if not module.params.get('subscription_user'):
- module.fail_json(msg='subscription_user parameter is ' +
- 'required for new instance')
- if not module.params.get('subscription_password'):
- module.fail_json(msg='subscription_password parameter is ' +
- 'required for new instance')
-
- try:
- (machine_dict_array) = create_virtual_machine(module, profitbricks)
- module.exit_json(**machine_dict_array)
- except Exception as e:
- module.fail_json(msg='failed to set instance state: %s' % to_native(e), exception=traceback.format_exc())
-
-
-if __name__ == '__main__':
- main()
diff --git a/plugins/modules/profitbricks_datacenter.py b/plugins/modules/profitbricks_datacenter.py
deleted file mode 100644
index 4aa1fa5eeb..0000000000
--- a/plugins/modules/profitbricks_datacenter.py
+++ /dev/null
@@ -1,266 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-# Copyright Ansible Project
-# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
----
-module: profitbricks_datacenter
-short_description: Create or destroy a ProfitBricks Virtual Datacenter
-description:
- - This is a simple module that supports creating or removing vDCs. A vDC is required before you can create servers. This module has a dependency
- on profitbricks >= 1.0.0
-extends_documentation_fragment:
- - community.general.attributes
-attributes:
- check_mode:
- support: none
- diff_mode:
- support: none
-options:
- name:
- description:
- - The name of the virtual datacenter.
- type: str
- description:
- description:
- - The description of the virtual datacenter.
- type: str
- required: false
- location:
- description:
- - The datacenter location.
- type: str
- required: false
- default: us/las
- choices: [ "us/las", "de/fra", "de/fkb" ]
- subscription_user:
- description:
- - The ProfitBricks username. Overrides the PB_SUBSCRIPTION_ID environment variable.
- type: str
- required: false
- subscription_password:
- description:
- - THe ProfitBricks password. Overrides the PB_PASSWORD environment variable.
- type: str
- required: false
- wait:
- description:
- - wait for the datacenter to be created before returning
- required: false
- default: true
- type: bool
- wait_timeout:
- description:
- - how long before wait gives up, in seconds
- type: int
- default: 600
- state:
- description:
- - Create or terminate datacenters.
- - "The available choices are: V(present), V(absent)."
- type: str
- required: false
- default: 'present'
-
-requirements: [ "profitbricks" ]
-author: Matt Baldwin (@baldwinSPC)
-'''
-
-EXAMPLES = '''
-- name: Create a datacenter
- community.general.profitbricks_datacenter:
- datacenter: Tardis One
- wait_timeout: 500
-
-- name: Destroy a datacenter (remove all servers, volumes, and other objects in the datacenter)
- community.general.profitbricks_datacenter:
- datacenter: Tardis One
- wait_timeout: 500
- state: absent
-'''
-
-import re
-import time
-
-HAS_PB_SDK = True
-try:
- from profitbricks.client import ProfitBricksService, Datacenter
-except ImportError:
- HAS_PB_SDK = False
-
-from ansible.module_utils.basic import AnsibleModule
-
-
-LOCATIONS = ['us/las',
- 'de/fra',
- 'de/fkb']
-
-uuid_match = re.compile(
- r'[\w]{8}-[\w]{4}-[\w]{4}-[\w]{4}-[\w]{12}', re.I)
-
-
-def _wait_for_completion(profitbricks, promise, wait_timeout, msg):
- if not promise:
- return
- wait_timeout = time.time() + wait_timeout
- while wait_timeout > time.time():
- time.sleep(5)
- operation_result = profitbricks.get_request(
- request_id=promise['requestId'],
- status=True)
-
- if operation_result['metadata']['status'] == "DONE":
- return
- elif operation_result['metadata']['status'] == "FAILED":
- raise Exception(
- 'Request failed to complete ' + msg + ' "' + str(
- promise['requestId']) + '" to complete.')
-
- raise Exception(
- 'Timed out waiting for async operation ' + msg + ' "' + str(
- promise['requestId']
- ) + '" to complete.')
-
-
-def _remove_datacenter(module, profitbricks, datacenter):
- try:
- profitbricks.delete_datacenter(datacenter)
- except Exception as e:
- module.fail_json(msg="failed to remove the datacenter: %s" % str(e))
-
-
-def create_datacenter(module, profitbricks):
- """
- Creates a Datacenter
-
- This will create a new Datacenter in the specified location.
-
- module : AnsibleModule object
- profitbricks: authenticated profitbricks object.
-
- Returns:
- True if a new datacenter was created, false otherwise
- """
- name = module.params.get('name')
- location = module.params.get('location')
- description = module.params.get('description')
- wait = module.params.get('wait')
- wait_timeout = int(module.params.get('wait_timeout'))
-
- i = Datacenter(
- name=name,
- location=location,
- description=description
- )
-
- try:
- datacenter_response = profitbricks.create_datacenter(datacenter=i)
-
- if wait:
- _wait_for_completion(profitbricks, datacenter_response,
- wait_timeout, "_create_datacenter")
-
- results = {
- 'datacenter_id': datacenter_response['id']
- }
-
- return results
-
- except Exception as e:
- module.fail_json(msg="failed to create the new datacenter: %s" % str(e))
-
-
-def remove_datacenter(module, profitbricks):
- """
- Removes a Datacenter.
-
- This will remove a datacenter.
-
- module : AnsibleModule object
- profitbricks: authenticated profitbricks object.
-
- Returns:
- True if the datacenter was deleted, false otherwise
- """
- name = module.params.get('name')
- changed = False
-
- if uuid_match.match(name):
- _remove_datacenter(module, profitbricks, name)
- changed = True
- else:
- datacenters = profitbricks.list_datacenters()
-
- for d in datacenters['items']:
- vdc = profitbricks.get_datacenter(d['id'])
-
- if name == vdc['properties']['name']:
- name = d['id']
- _remove_datacenter(module, profitbricks, name)
- changed = True
-
- return changed
-
-
-def main():
- module = AnsibleModule(
- argument_spec=dict(
- name=dict(),
- description=dict(),
- location=dict(choices=LOCATIONS, default='us/las'),
- subscription_user=dict(),
- subscription_password=dict(no_log=True),
- wait=dict(type='bool', default=True),
- wait_timeout=dict(default=600, type='int'),
- state=dict(default='present'), # @TODO add choices
- )
- )
- if not HAS_PB_SDK:
- module.fail_json(msg='profitbricks required for this module')
-
- if not module.params.get('subscription_user'):
- module.fail_json(msg='subscription_user parameter is required')
- if not module.params.get('subscription_password'):
- module.fail_json(msg='subscription_password parameter is required')
-
- subscription_user = module.params.get('subscription_user')
- subscription_password = module.params.get('subscription_password')
-
- profitbricks = ProfitBricksService(
- username=subscription_user,
- password=subscription_password)
-
- state = module.params.get('state')
-
- if state == 'absent':
- if not module.params.get('name'):
- module.fail_json(msg='name parameter is required deleting a virtual datacenter.')
-
- try:
- (changed) = remove_datacenter(module, profitbricks)
- module.exit_json(
- changed=changed)
- except Exception as e:
- module.fail_json(msg='failed to set datacenter state: %s' % str(e))
-
- elif state == 'present':
- if not module.params.get('name'):
- module.fail_json(msg='name parameter is required for a new datacenter')
- if not module.params.get('location'):
- module.fail_json(msg='location parameter is required for a new datacenter')
-
- try:
- (datacenter_dict_array) = create_datacenter(module, profitbricks)
- module.exit_json(**datacenter_dict_array)
- except Exception as e:
- module.fail_json(msg='failed to set datacenter state: %s' % str(e))
-
-
-if __name__ == '__main__':
- main()
diff --git a/plugins/modules/profitbricks_nic.py b/plugins/modules/profitbricks_nic.py
deleted file mode 100644
index 9498be15dc..0000000000
--- a/plugins/modules/profitbricks_nic.py
+++ /dev/null
@@ -1,297 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-# Copyright Ansible Project
-# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
----
-module: profitbricks_nic
-short_description: Create or Remove a NIC
-description:
- - This module allows you to create or restore a volume snapshot. This module has a dependency on profitbricks >= 1.0.0
-extends_documentation_fragment:
- - community.general.attributes
-attributes:
- check_mode:
- support: none
- diff_mode:
- support: none
-options:
- datacenter:
- description:
- - The datacenter in which to operate.
- type: str
- required: true
- server:
- description:
- - The server name or ID.
- type: str
- required: true
- name:
- description:
- - The name or ID of the NIC. This is only required on deletes, but not on create.
- - If not specified, it defaults to a value based on UUID4.
- type: str
- lan:
- description:
- - The LAN to place the NIC on. You can pass a LAN that doesn't exist and it will be created. Required on create.
- type: str
- subscription_user:
- description:
- - The ProfitBricks username. Overrides the PB_SUBSCRIPTION_ID environment variable.
- type: str
- required: true
- subscription_password:
- description:
- - THe ProfitBricks password. Overrides the PB_PASSWORD environment variable.
- type: str
- required: true
- wait:
- description:
- - wait for the operation to complete before returning
- required: false
- default: true
- type: bool
- wait_timeout:
- description:
- - how long before wait gives up, in seconds
- type: int
- default: 600
- state:
- description:
- - Indicate desired state of the resource
- - "The available choices are: V(present), V(absent)."
- type: str
- required: false
- default: 'present'
-
-requirements: [ "profitbricks" ]
-author: Matt Baldwin (@baldwinSPC)
-'''
-
-EXAMPLES = '''
-- name: Create a NIC
- community.general.profitbricks_nic:
- datacenter: Tardis One
- server: node002
- lan: 2
- wait_timeout: 500
- state: present
-
-- name: Remove a NIC
- community.general.profitbricks_nic:
- datacenter: Tardis One
- server: node002
- name: 7341c2454f
- wait_timeout: 500
- state: absent
-'''
-
-import re
-import uuid
-import time
-
-HAS_PB_SDK = True
-try:
- from profitbricks.client import ProfitBricksService, NIC
-except ImportError:
- HAS_PB_SDK = False
-
-from ansible.module_utils.basic import AnsibleModule
-
-
-uuid_match = re.compile(
- r'[\w]{8}-[\w]{4}-[\w]{4}-[\w]{4}-[\w]{12}', re.I)
-
-
-def _make_default_name():
- return str(uuid.uuid4()).replace('-', '')[:10]
-
-
-def _wait_for_completion(profitbricks, promise, wait_timeout, msg):
- if not promise:
- return
- wait_timeout = time.time() + wait_timeout
- while wait_timeout > time.time():
- time.sleep(5)
- operation_result = profitbricks.get_request(
- request_id=promise['requestId'],
- status=True)
-
- if operation_result['metadata']['status'] == "DONE":
- return
- elif operation_result['metadata']['status'] == "FAILED":
- raise Exception(
- 'Request failed to complete ' + msg + ' "' + str(
- promise['requestId']) + '" to complete.')
-
- raise Exception(
- 'Timed out waiting for async operation ' + msg + ' "' + str(
- promise['requestId']
- ) + '" to complete.')
-
-
-def create_nic(module, profitbricks):
- """
- Creates a NIC.
-
- module : AnsibleModule object
- profitbricks: authenticated profitbricks object.
-
- Returns:
- True if the nic creates, false otherwise
- """
- datacenter = module.params.get('datacenter')
- server = module.params.get('server')
- lan = module.params.get('lan')
- name = module.params.get('name')
- if name is None:
- name = _make_default_name()
- wait = module.params.get('wait')
- wait_timeout = module.params.get('wait_timeout')
-
- # Locate UUID for Datacenter
- if not (uuid_match.match(datacenter)):
- datacenter_list = profitbricks.list_datacenters()
- for d in datacenter_list['items']:
- dc = profitbricks.get_datacenter(d['id'])
- if datacenter == dc['properties']['name']:
- datacenter = d['id']
- break
-
- # Locate UUID for Server
- if not (uuid_match.match(server)):
- server_list = profitbricks.list_servers(datacenter)
- for s in server_list['items']:
- if server == s['properties']['name']:
- server = s['id']
- break
- try:
- n = NIC(
- name=name,
- lan=lan
- )
-
- nic_response = profitbricks.create_nic(datacenter, server, n)
-
- if wait:
- _wait_for_completion(profitbricks, nic_response,
- wait_timeout, "create_nic")
-
- return nic_response
-
- except Exception as e:
- module.fail_json(msg="failed to create the NIC: %s" % str(e))
-
-
-def delete_nic(module, profitbricks):
- """
- Removes a NIC
-
- module : AnsibleModule object
- profitbricks: authenticated profitbricks object.
-
- Returns:
- True if the NIC was removed, false otherwise
- """
- datacenter = module.params.get('datacenter')
- server = module.params.get('server')
- name = module.params.get('name')
- if name is None:
- name = _make_default_name()
-
- # Locate UUID for Datacenter
- if not (uuid_match.match(datacenter)):
- datacenter_list = profitbricks.list_datacenters()
- for d in datacenter_list['items']:
- dc = profitbricks.get_datacenter(d['id'])
- if datacenter == dc['properties']['name']:
- datacenter = d['id']
- break
-
- # Locate UUID for Server
- server_found = False
- if not (uuid_match.match(server)):
- server_list = profitbricks.list_servers(datacenter)
- for s in server_list['items']:
- if server == s['properties']['name']:
- server_found = True
- server = s['id']
- break
-
- if not server_found:
- return False
-
- # Locate UUID for NIC
- nic_found = False
- if not (uuid_match.match(name)):
- nic_list = profitbricks.list_nics(datacenter, server)
- for n in nic_list['items']:
- if name == n['properties']['name']:
- nic_found = True
- name = n['id']
- break
-
- if not nic_found:
- return False
-
- try:
- nic_response = profitbricks.delete_nic(datacenter, server, name)
- return nic_response
- except Exception as e:
- module.fail_json(msg="failed to remove the NIC: %s" % str(e))
-
-
-def main():
- module = AnsibleModule(
- argument_spec=dict(
- datacenter=dict(required=True),
- server=dict(required=True),
- name=dict(),
- lan=dict(),
- subscription_user=dict(required=True),
- subscription_password=dict(required=True, no_log=True),
- wait=dict(type='bool', default=True),
- wait_timeout=dict(type='int', default=600),
- state=dict(default='present'),
- ),
- required_if=(
- ('state', 'absent', ['name']),
- ('state', 'present', ['lan']),
- )
- )
-
- if not HAS_PB_SDK:
- module.fail_json(msg='profitbricks required for this module')
-
- subscription_user = module.params.get('subscription_user')
- subscription_password = module.params.get('subscription_password')
-
- profitbricks = ProfitBricksService(
- username=subscription_user,
- password=subscription_password)
-
- state = module.params.get('state')
-
- if state == 'absent':
- try:
- (changed) = delete_nic(module, profitbricks)
- module.exit_json(changed=changed)
- except Exception as e:
- module.fail_json(msg='failed to set nic state: %s' % str(e))
-
- elif state == 'present':
- try:
- (nic_dict) = create_nic(module, profitbricks)
- module.exit_json(nics=nic_dict) # @FIXME changed not calculated?
- except Exception as e:
- module.fail_json(msg='failed to set nic state: %s' % str(e))
-
-
-if __name__ == '__main__':
- main()
diff --git a/plugins/modules/profitbricks_volume.py b/plugins/modules/profitbricks_volume.py
deleted file mode 100644
index f623da7128..0000000000
--- a/plugins/modules/profitbricks_volume.py
+++ /dev/null
@@ -1,440 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-# Copyright Ansible Project
-# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
----
-module: profitbricks_volume
-short_description: Create or destroy a volume
-description:
- - Allows you to create or remove a volume from a ProfitBricks datacenter. This module has a dependency on profitbricks >= 1.0.0
-extends_documentation_fragment:
- - community.general.attributes
-attributes:
- check_mode:
- support: none
- diff_mode:
- support: none
-options:
- datacenter:
- description:
- - The datacenter in which to create the volumes.
- type: str
- name:
- description:
- - The name of the volumes. You can enumerate the names using auto_increment.
- type: str
- size:
- description:
- - The size of the volume.
- type: int
- required: false
- default: 10
- bus:
- description:
- - The bus type.
- type: str
- required: false
- default: VIRTIO
- choices: [ "IDE", "VIRTIO"]
- image:
- description:
- - The system image ID for the volume, e.g. a3eae284-a2fe-11e4-b187-5f1f641608c8. This can also be a snapshot image ID.
- type: str
- image_password:
- description:
- - Password set for the administrative user.
- type: str
- required: false
- ssh_keys:
- description:
- - Public SSH keys allowing access to the virtual machine.
- type: list
- elements: str
- default: []
- disk_type:
- description:
- - The disk type of the volume.
- type: str
- required: false
- default: HDD
- choices: [ "HDD", "SSD" ]
- licence_type:
- description:
- - The licence type for the volume. This is used when the image is non-standard.
- - "The available choices are: V(LINUX), V(WINDOWS), V(UNKNOWN), V(OTHER)."
- type: str
- required: false
- default: UNKNOWN
- count:
- description:
- - The number of volumes you wish to create.
- type: int
- required: false
- default: 1
- auto_increment:
- description:
- - Whether or not to increment a single number in the name for created virtual machines.
- default: true
- type: bool
- instance_ids:
- description:
- - list of instance ids, currently only used when state='absent' to remove instances.
- type: list
- elements: str
- default: []
- subscription_user:
- description:
- - The ProfitBricks username. Overrides the PB_SUBSCRIPTION_ID environment variable.
- type: str
- required: false
- subscription_password:
- description:
- - THe ProfitBricks password. Overrides the PB_PASSWORD environment variable.
- type: str
- required: false
- wait:
- description:
- - wait for the datacenter to be created before returning
- required: false
- default: true
- type: bool
- wait_timeout:
- description:
- - how long before wait gives up, in seconds
- type: int
- default: 600
- state:
- description:
- - create or terminate datacenters
- - "The available choices are: V(present), V(absent)."
- type: str
- required: false
- default: 'present'
- server:
- description:
- - Server name to attach the volume to.
- type: str
-
-requirements: [ "profitbricks" ]
-author: Matt Baldwin (@baldwinSPC)
-'''
-
-EXAMPLES = '''
-- name: Create multiple volumes
- community.general.profitbricks_volume:
- datacenter: Tardis One
- name: vol%02d
- count: 5
- auto_increment: true
- wait_timeout: 500
- state: present
-
-- name: Remove Volumes
- community.general.profitbricks_volume:
- datacenter: Tardis One
- instance_ids:
- - 'vol01'
- - 'vol02'
- wait_timeout: 500
- state: absent
-'''
-
-import re
-import time
-import traceback
-
-HAS_PB_SDK = True
-try:
- from profitbricks.client import ProfitBricksService, Volume
-except ImportError:
- HAS_PB_SDK = False
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible.module_utils.six.moves import xrange
-from ansible.module_utils.common.text.converters import to_native
-
-
-uuid_match = re.compile(
- r'[\w]{8}-[\w]{4}-[\w]{4}-[\w]{4}-[\w]{12}', re.I)
-
-
-def _wait_for_completion(profitbricks, promise, wait_timeout, msg):
- if not promise:
- return
- wait_timeout = time.time() + wait_timeout
- while wait_timeout > time.time():
- time.sleep(5)
- operation_result = profitbricks.get_request(
- request_id=promise['requestId'],
- status=True)
-
- if operation_result['metadata']['status'] == "DONE":
- return
- elif operation_result['metadata']['status'] == "FAILED":
- raise Exception(
- 'Request failed to complete ' + msg + ' "' + str(
- promise['requestId']) + '" to complete.')
-
- raise Exception(
- 'Timed out waiting for async operation ' + msg + ' "' + str(
- promise['requestId']
- ) + '" to complete.')
-
-
-def _create_volume(module, profitbricks, datacenter, name):
- size = module.params.get('size')
- bus = module.params.get('bus')
- image = module.params.get('image')
- image_password = module.params.get('image_password')
- ssh_keys = module.params.get('ssh_keys')
- disk_type = module.params.get('disk_type')
- licence_type = module.params.get('licence_type')
- wait_timeout = module.params.get('wait_timeout')
- wait = module.params.get('wait')
-
- try:
- v = Volume(
- name=name,
- size=size,
- bus=bus,
- image=image,
- image_password=image_password,
- ssh_keys=ssh_keys,
- disk_type=disk_type,
- licence_type=licence_type
- )
-
- volume_response = profitbricks.create_volume(datacenter, v)
-
- if wait:
- _wait_for_completion(profitbricks, volume_response,
- wait_timeout, "_create_volume")
-
- except Exception as e:
- module.fail_json(msg="failed to create the volume: %s" % str(e))
-
- return volume_response
-
-
-def _delete_volume(module, profitbricks, datacenter, volume):
- try:
- profitbricks.delete_volume(datacenter, volume)
- except Exception as e:
- module.fail_json(msg="failed to remove the volume: %s" % str(e))
-
-
-def create_volume(module, profitbricks):
- """
- Creates a volume.
-
- This will create a volume in a datacenter.
-
- module : AnsibleModule object
- profitbricks: authenticated profitbricks object.
-
- Returns:
- True if the volume was created, false otherwise
- """
- datacenter = module.params.get('datacenter')
- name = module.params.get('name')
- auto_increment = module.params.get('auto_increment')
- count = module.params.get('count')
-
- datacenter_found = False
- failed = True
- volumes = []
-
- # Locate UUID for Datacenter
- if not (uuid_match.match(datacenter)):
- datacenter_list = profitbricks.list_datacenters()
- for d in datacenter_list['items']:
- dc = profitbricks.get_datacenter(d['id'])
- if datacenter == dc['properties']['name']:
- datacenter = d['id']
- datacenter_found = True
- break
-
- if not datacenter_found:
- module.fail_json(msg='datacenter could not be found.')
-
- if auto_increment:
- numbers = set()
- count_offset = 1
-
- try:
- name % 0
- except TypeError as e:
- if e.message.startswith('not all'):
- name = '%s%%d' % name
- else:
- module.fail_json(msg=e.message, exception=traceback.format_exc())
-
- number_range = xrange(count_offset, count_offset + count + len(numbers))
- available_numbers = list(set(number_range).difference(numbers))
- names = []
- numbers_to_use = available_numbers[:count]
- for number in numbers_to_use:
- names.append(name % number)
- else:
- names = [name] * count
-
- for name in names:
- create_response = _create_volume(module, profitbricks, str(datacenter), name)
- volumes.append(create_response)
- _attach_volume(module, profitbricks, datacenter, create_response['id'])
- failed = False
-
- results = {
- 'failed': failed,
- 'volumes': volumes,
- 'action': 'create',
- 'instance_ids': {
- 'instances': [i['id'] for i in volumes],
- }
- }
-
- return results
-
-
-def delete_volume(module, profitbricks):
- """
- Removes a volume.
-
- This will create a volume in a datacenter.
-
- module : AnsibleModule object
- profitbricks: authenticated profitbricks object.
-
- Returns:
- True if the volume was removed, false otherwise
- """
- if not isinstance(module.params.get('instance_ids'), list) or len(module.params.get('instance_ids')) < 1:
- module.fail_json(msg='instance_ids should be a list of virtual machine ids or names, aborting')
-
- datacenter = module.params.get('datacenter')
- changed = False
- instance_ids = module.params.get('instance_ids')
-
- # Locate UUID for Datacenter
- if not (uuid_match.match(datacenter)):
- datacenter_list = profitbricks.list_datacenters()
- for d in datacenter_list['items']:
- dc = profitbricks.get_datacenter(d['id'])
- if datacenter == dc['properties']['name']:
- datacenter = d['id']
- break
-
- for n in instance_ids:
- if uuid_match.match(n):
- _delete_volume(module, profitbricks, datacenter, n)
- changed = True
- else:
- volumes = profitbricks.list_volumes(datacenter)
- for v in volumes['items']:
- if n == v['properties']['name']:
- volume_id = v['id']
- _delete_volume(module, profitbricks, datacenter, volume_id)
- changed = True
-
- return changed
-
-
-def _attach_volume(module, profitbricks, datacenter, volume):
- """
- Attaches a volume.
-
- This will attach a volume to the server.
-
- module : AnsibleModule object
- profitbricks: authenticated profitbricks object.
-
- Returns:
- True if the volume was attached, false otherwise
- """
- server = module.params.get('server')
-
- # Locate UUID for Server
- if server:
- if not (uuid_match.match(server)):
- server_list = profitbricks.list_servers(datacenter)
- for s in server_list['items']:
- if server == s['properties']['name']:
- server = s['id']
- break
-
- try:
- return profitbricks.attach_volume(datacenter, server, volume)
- except Exception as e:
- module.fail_json(msg='failed to attach volume: %s' % to_native(e), exception=traceback.format_exc())
-
-
-def main():
- module = AnsibleModule(
- argument_spec=dict(
- datacenter=dict(),
- server=dict(),
- name=dict(),
- size=dict(type='int', default=10),
- bus=dict(choices=['VIRTIO', 'IDE'], default='VIRTIO'),
- image=dict(),
- image_password=dict(no_log=True),
- ssh_keys=dict(type='list', elements='str', default=[], no_log=False),
- disk_type=dict(choices=['HDD', 'SSD'], default='HDD'),
- licence_type=dict(default='UNKNOWN'),
- count=dict(type='int', default=1),
- auto_increment=dict(type='bool', default=True),
- instance_ids=dict(type='list', elements='str', default=[]),
- subscription_user=dict(),
- subscription_password=dict(no_log=True),
- wait=dict(type='bool', default=True),
- wait_timeout=dict(type='int', default=600),
- state=dict(default='present'),
- )
- )
-
- if not module.params.get('subscription_user'):
- module.fail_json(msg='subscription_user parameter is required')
- if not module.params.get('subscription_password'):
- module.fail_json(msg='subscription_password parameter is required')
-
- subscription_user = module.params.get('subscription_user')
- subscription_password = module.params.get('subscription_password')
-
- profitbricks = ProfitBricksService(
- username=subscription_user,
- password=subscription_password)
-
- state = module.params.get('state')
-
- if state == 'absent':
- if not module.params.get('datacenter'):
- module.fail_json(msg='datacenter parameter is required for running or stopping machines.')
-
- try:
- (changed) = delete_volume(module, profitbricks)
- module.exit_json(changed=changed)
- except Exception as e:
- module.fail_json(msg='failed to set volume state: %s' % to_native(e), exception=traceback.format_exc())
-
- elif state == 'present':
- if not module.params.get('datacenter'):
- module.fail_json(msg='datacenter parameter is required for new instance')
- if not module.params.get('name'):
- module.fail_json(msg='name parameter is required for new instance')
-
- try:
- (volume_dict_array) = create_volume(module, profitbricks)
- module.exit_json(**volume_dict_array)
- except Exception as e:
- module.fail_json(msg='failed to set volume state: %s' % to_native(e), exception=traceback.format_exc())
-
-
-if __name__ == '__main__':
- main()
diff --git a/plugins/modules/profitbricks_volume_attachments.py b/plugins/modules/profitbricks_volume_attachments.py
deleted file mode 100644
index 76459515ee..0000000000
--- a/plugins/modules/profitbricks_volume_attachments.py
+++ /dev/null
@@ -1,267 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-# Copyright Ansible Project
-# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
----
-module: profitbricks_volume_attachments
-short_description: Attach or detach a volume
-description:
- - Allows you to attach or detach a volume from a ProfitBricks server. This module has a dependency on profitbricks >= 1.0.0
-extends_documentation_fragment:
- - community.general.attributes
-attributes:
- check_mode:
- support: none
- diff_mode:
- support: none
-options:
- datacenter:
- description:
- - The datacenter in which to operate.
- type: str
- server:
- description:
- - The name of the server you wish to detach or attach the volume.
- type: str
- volume:
- description:
- - The volume name or ID.
- type: str
- subscription_user:
- description:
- - The ProfitBricks username. Overrides the PB_SUBSCRIPTION_ID environment variable.
- type: str
- required: false
- subscription_password:
- description:
- - THe ProfitBricks password. Overrides the PB_PASSWORD environment variable.
- type: str
- required: false
- wait:
- description:
- - wait for the operation to complete before returning
- required: false
- default: true
- type: bool
- wait_timeout:
- description:
- - how long before wait gives up, in seconds
- type: int
- default: 600
- state:
- description:
- - Indicate desired state of the resource
- - "The available choices are: V(present), V(absent)."
- type: str
- required: false
- default: 'present'
-
-requirements: [ "profitbricks" ]
-author: Matt Baldwin (@baldwinSPC)
-'''
-
-EXAMPLES = '''
-- name: Attach a volume
- community.general.profitbricks_volume_attachments:
- datacenter: Tardis One
- server: node002
- volume: vol01
- wait_timeout: 500
- state: present
-
-- name: Detach a volume
- community.general.profitbricks_volume_attachments:
- datacenter: Tardis One
- server: node002
- volume: vol01
- wait_timeout: 500
- state: absent
-'''
-
-import re
-import time
-
-HAS_PB_SDK = True
-try:
- from profitbricks.client import ProfitBricksService
-except ImportError:
- HAS_PB_SDK = False
-
-from ansible.module_utils.basic import AnsibleModule
-
-
-uuid_match = re.compile(
- r'[\w]{8}-[\w]{4}-[\w]{4}-[\w]{4}-[\w]{12}', re.I)
-
-
-def _wait_for_completion(profitbricks, promise, wait_timeout, msg):
- if not promise:
- return
- wait_timeout = time.time() + wait_timeout
- while wait_timeout > time.time():
- time.sleep(5)
- operation_result = profitbricks.get_request(
- request_id=promise['requestId'],
- status=True)
-
- if operation_result['metadata']['status'] == "DONE":
- return
- elif operation_result['metadata']['status'] == "FAILED":
- raise Exception(
- 'Request failed to complete ' + msg + ' "' + str(
- promise['requestId']) + '" to complete.')
-
- raise Exception(
- 'Timed out waiting for async operation ' + msg + ' "' + str(
- promise['requestId']
- ) + '" to complete.')
-
-
-def attach_volume(module, profitbricks):
- """
- Attaches a volume.
-
- This will attach a volume to the server.
-
- module : AnsibleModule object
- profitbricks: authenticated profitbricks object.
-
- Returns:
- True if the volume was attached, false otherwise
- """
- datacenter = module.params.get('datacenter')
- server = module.params.get('server')
- volume = module.params.get('volume')
-
- # Locate UUID for Datacenter
- if not (uuid_match.match(datacenter)):
- datacenter_list = profitbricks.list_datacenters()
- for d in datacenter_list['items']:
- dc = profitbricks.get_datacenter(d['id'])
- if datacenter == dc['properties']['name']:
- datacenter = d['id']
- break
-
- # Locate UUID for Server
- if not (uuid_match.match(server)):
- server_list = profitbricks.list_servers(datacenter)
- for s in server_list['items']:
- if server == s['properties']['name']:
- server = s['id']
- break
-
- # Locate UUID for Volume
- if not (uuid_match.match(volume)):
- volume_list = profitbricks.list_volumes(datacenter)
- for v in volume_list['items']:
- if volume == v['properties']['name']:
- volume = v['id']
- break
-
- return profitbricks.attach_volume(datacenter, server, volume)
-
-
-def detach_volume(module, profitbricks):
- """
- Detaches a volume.
-
- This will remove a volume from the server.
-
- module : AnsibleModule object
- profitbricks: authenticated profitbricks object.
-
- Returns:
- True if the volume was detached, false otherwise
- """
- datacenter = module.params.get('datacenter')
- server = module.params.get('server')
- volume = module.params.get('volume')
-
- # Locate UUID for Datacenter
- if not (uuid_match.match(datacenter)):
- datacenter_list = profitbricks.list_datacenters()
- for d in datacenter_list['items']:
- dc = profitbricks.get_datacenter(d['id'])
- if datacenter == dc['properties']['name']:
- datacenter = d['id']
- break
-
- # Locate UUID for Server
- if not (uuid_match.match(server)):
- server_list = profitbricks.list_servers(datacenter)
- for s in server_list['items']:
- if server == s['properties']['name']:
- server = s['id']
- break
-
- # Locate UUID for Volume
- if not (uuid_match.match(volume)):
- volume_list = profitbricks.list_volumes(datacenter)
- for v in volume_list['items']:
- if volume == v['properties']['name']:
- volume = v['id']
- break
-
- return profitbricks.detach_volume(datacenter, server, volume)
-
-
-def main():
- module = AnsibleModule(
- argument_spec=dict(
- datacenter=dict(),
- server=dict(),
- volume=dict(),
- subscription_user=dict(),
- subscription_password=dict(no_log=True),
- wait=dict(type='bool', default=True),
- wait_timeout=dict(type='int', default=600),
- state=dict(default='present'),
- )
- )
-
- if not HAS_PB_SDK:
- module.fail_json(msg='profitbricks required for this module')
-
- if not module.params.get('subscription_user'):
- module.fail_json(msg='subscription_user parameter is required')
- if not module.params.get('subscription_password'):
- module.fail_json(msg='subscription_password parameter is required')
- if not module.params.get('datacenter'):
- module.fail_json(msg='datacenter parameter is required')
- if not module.params.get('server'):
- module.fail_json(msg='server parameter is required')
- if not module.params.get('volume'):
- module.fail_json(msg='volume parameter is required')
-
- subscription_user = module.params.get('subscription_user')
- subscription_password = module.params.get('subscription_password')
-
- profitbricks = ProfitBricksService(
- username=subscription_user,
- password=subscription_password)
-
- state = module.params.get('state')
-
- if state == 'absent':
- try:
- (changed) = detach_volume(module, profitbricks)
- module.exit_json(changed=changed)
- except Exception as e:
- module.fail_json(msg='failed to set volume_attach state: %s' % str(e))
- elif state == 'present':
- try:
- attach_volume(module, profitbricks)
- module.exit_json()
- except Exception as e:
- module.fail_json(msg='failed to set volume_attach state: %s' % str(e))
-
-
-if __name__ == '__main__':
- main()
diff --git a/plugins/modules/proxmox.py b/plugins/modules/proxmox.py
deleted file mode 100644
index 52d5a849f3..0000000000
--- a/plugins/modules/proxmox.py
+++ /dev/null
@@ -1,1324 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-#
-# Copyright Ansible Project
-# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-DOCUMENTATION = '''
----
-module: proxmox
-short_description: Management of instances in Proxmox VE cluster
-description:
- - Allows you to create/delete/stop instances in Proxmox VE cluster.
- - The module automatically detects containerization type (lxc for PVE 4, openvz for older).
- - Since community.general 4.0.0 on, there are no more default values.
-attributes:
- check_mode:
- support: none
- diff_mode:
- support: none
- action_group:
- version_added: 9.0.0
-options:
- password:
- description:
- - the instance root password
- type: str
- hostname:
- description:
- - the instance hostname
- - required only for O(state=present)
- - must be unique if vmid is not passed
- type: str
- ostemplate:
- description:
- - the template for VM creating
- - required only for O(state=present)
- type: str
- disk:
- description:
- - This option was previously described as "hard disk size in GB for instance" however several formats describing
- a lxc mount are permitted.
- - Older versions of Proxmox will accept a numeric value for size using the O(storage) parameter to automatically
- choose which storage to allocate from, however new versions enforce the C(:) syntax.
- - "Additional options are available by using some combination of the following key-value pairs as a
- comma-delimited list C([volume=] [,acl=<1|0>] [,mountoptions=] [,quota=<1|0>]
- [,replicate=<1|0>] [,ro=<1|0>] [,shared=<1|0>] [,size=])."
- - See U(https://pve.proxmox.com/wiki/Linux_Container) for a full description.
- - This option is mutually exclusive with O(storage) and O(disk_volume).
- type: str
- disk_volume:
- description:
- - Specify a hash/dictionary of the C(rootfs) disk.
- - See U(https://pve.proxmox.com/wiki/Linux_Container#pct_mount_points) for a full description.
- - This option is mutually exclusive with O(storage) and O(disk).
- type: dict
- version_added: 9.2.0
- suboptions:
- storage:
- description:
- - O(disk_volume.storage) is the storage identifier of the storage to use for the C(rootfs).
- - Mutually exclusive with O(disk_volume.host_path).
- type: str
- volume:
- description:
- - O(disk_volume.volume) is the name of an existing volume.
- - If not defined, the module will check if one exists. If not, a new volume will be created.
- - If defined, the volume must exist under that name.
- - Required only if O(disk_volume.storage) is defined and mutually exclusive with O(disk_volume.host_path).
- type: str
- size:
- description:
- - O(disk_volume.size) is the size of the storage to use.
- - The size is given in GB.
- - Required only if O(disk_volume.storage) is defined and mutually exclusive with O(disk_volume.host_path).
- type: int
- host_path:
- description:
- - O(disk_volume.host_path) defines a bind or device path on the PVE host to use for the C(rootfs).
- - Mutually exclusive with O(disk_volume.storage), O(disk_volume.volume), and O(disk_volume.size).
- type: path
- options:
- description:
- - O(disk_volume.options) is a dict of extra options.
- - The value of any given option must be a string, for example V("1").
- type: dict
- cores:
- description:
- - Specify number of cores per socket.
- type: int
- cpus:
- description:
- - numbers of allocated cpus for instance
- type: int
- memory:
- description:
- - memory size in MB for instance
- type: int
- swap:
- description:
- - swap memory size in MB for instance
- type: int
- netif:
- description:
- - specifies network interfaces for the container. As a hash/dictionary defining interfaces.
- type: dict
- features:
- description:
- - Specifies a list of features to be enabled. For valid options, see U(https://pve.proxmox.com/wiki/Linux_Container#pct_options).
- - Some features require the use of a privileged container.
- type: list
- elements: str
- version_added: 2.0.0
- startup:
- description:
- - Specifies the startup order of the container.
- - Use C(order=#) where C(#) is a non-negative number to define the general startup order. Shutdown in done with reverse ordering.
- - Use C(up=#) where C(#) is in seconds, to specify a delay to wait before the next VM is started.
- - Use C(down=#) where C(#) is in seconds, to specify a delay to wait before the next VM is stopped.
- type: list
- elements: str
- version_added: 8.5.0
- mounts:
- description:
- - Specifies additional mounts (separate disks) for the container. As a hash/dictionary defining mount points as strings.
- - This Option is mutually exclusive with O(mount_volumes).
- type: dict
- mount_volumes:
- description:
- - Specify additional mounts (separate disks) for the container. As a hash/dictionary defining mount points.
- - See U(https://pve.proxmox.com/wiki/Linux_Container#pct_mount_points) for a full description.
- - This Option is mutually exclusive with O(mounts).
- type: list
- elements: dict
- version_added: 9.2.0
- suboptions:
- id:
- description:
- - O(mount_volumes[].id) is the identifier of the mount point written as C(mp[n]).
- type: str
- required: true
- storage:
- description:
- - O(mount_volumes[].storage) is the storage identifier of the storage to use.
- - Mutually exclusive with O(mount_volumes[].host_path).
- type: str
- volume:
- description:
- - O(mount_volumes[].volume) is the name of an existing volume.
- - If not defined, the module will check if one exists. If not, a new volume will be created.
- - If defined, the volume must exist under that name.
- - Required only if O(mount_volumes[].storage) is defined and mutually exclusive with O(mount_volumes[].host_path).
- type: str
- size:
- description:
- - O(mount_volumes[].size) is the size of the storage to use.
- - The size is given in GB.
- - Required only if O(mount_volumes[].storage) is defined and mutually exclusive with O(mount_volumes[].host_path).
- type: int
- host_path:
- description:
- - O(mount_volumes[].host_path) defines a bind or device path on the PVE host to use for the C(rootfs).
- - Mutually exclusive with O(mount_volumes[].storage), O(mount_volumes[].volume), and O(mount_volumes[].size).
- type: path
- mountpoint:
- description:
- - O(mount_volumes[].mountpoint) is the mount point of the volume.
- type: path
- required: true
- options:
- description:
- - O(mount_volumes[].options) is a dict of extra options.
- - The value of any given option must be a string, for example V("1").
- type: dict
- ip_address:
- description:
- - specifies the address the container will be assigned
- type: str
- onboot:
- description:
- - specifies whether a VM will be started during system bootup
- type: bool
- storage:
- description:
- - Target storage.
- - This Option is mutually exclusive with O(disk) and O(disk_volume).
- type: str
- default: 'local'
- ostype:
- description:
- - Specifies the C(ostype) of the LXC container.
- - If set to V(auto), no C(ostype) will be provided on instance creation.
- choices: ['auto', 'debian', 'devuan', 'ubuntu', 'centos', 'fedora', 'opensuse', 'archlinux', 'alpine', 'gentoo', 'nixos', 'unmanaged']
- type: str
- default: 'auto'
- version_added: 8.1.0
- cpuunits:
- description:
- - CPU weight for a VM
- type: int
- nameserver:
- description:
- - sets DNS server IP address for a container
- type: str
- searchdomain:
- description:
- - sets DNS search domain for a container
- type: str
- tags:
- description:
- - List of tags to apply to the container.
- - Tags must start with V([a-z0-9_]) followed by zero or more of the following characters V([a-z0-9_-+.]).
- - Tags are only available in Proxmox 7+.
- type: list
- elements: str
- version_added: 6.2.0
- timeout:
- description:
- - timeout for operations
- type: int
- default: 30
- update:
- description:
- - If V(true), the container will be updated with new values.
- type: bool
- default: false
- version_added: 8.1.0
- force:
- description:
- - Forcing operations.
- - Can be used only with states V(present), V(stopped), V(restarted).
- - with O(state=present) force option allow to overwrite existing container.
- - with states V(stopped), V(restarted) allow to force stop instance.
- type: bool
- default: false
- purge:
- description:
- - Remove container from all related configurations.
- - For example backup jobs, replication jobs, or HA.
- - Related ACLs and Firewall entries will always be removed.
- - Used with O(state=absent).
- type: bool
- default: false
- version_added: 2.3.0
- state:
- description:
- - Indicate desired state of the instance
- - V(template) was added in community.general 8.1.0.
- type: str
- choices: ['present', 'started', 'absent', 'stopped', 'restarted', 'template']
- default: present
- pubkey:
- description:
- - Public key to add to /root/.ssh/authorized_keys. This was added on Proxmox 4.2, it is ignored for earlier versions
- type: str
- unprivileged:
- description:
- - Indicate if the container should be unprivileged.
- - The default change to V(true) in community.general 7.0.0. It used to be V(false) before.
- type: bool
- default: true
- description:
- description:
- - Specify the description for the container. Only used on the configuration web interface.
- - This is saved as a comment inside the configuration file.
- type: str
- version_added: '0.2.0'
- hookscript:
- description:
- - Script that will be executed during various steps in the containers lifetime.
- type: str
- version_added: '0.2.0'
- timezone:
- description:
- - Timezone used by the container, accepts values like V(Europe/Paris).
- - The special value V(host) configures the same timezone used by Proxmox host.
- type: str
- version_added: '7.1.0'
- clone:
- description:
- - ID of the container to be cloned.
- - O(description), O(hostname), and O(pool) will be copied from the cloned container if not specified.
- - The type of clone created is defined by the O(clone_type) parameter.
- - This operator is only supported for Proxmox clusters that use LXC containerization (PVE version >= 4).
- type: int
- version_added: 4.3.0
- clone_type:
- description:
- - Type of the clone created.
- - V(full) creates a full clone, and O(storage) must be specified.
- - V(linked) creates a linked clone, and the cloned container must be a template container.
- - V(opportunistic) creates a linked clone if the cloned container is a template container, and a full clone if not.
- O(storage) may be specified, if not it will fall back to the default.
- type: str
- choices: ['full', 'linked', 'opportunistic']
- default: opportunistic
- version_added: 4.3.0
-author: Sergei Antipov (@UnderGreen)
-seealso:
- - module: community.general.proxmox_vm_info
-extends_documentation_fragment:
- - community.general.proxmox.actiongroup_proxmox
- - community.general.proxmox.documentation
- - community.general.proxmox.selection
- - community.general.attributes
-'''
-
-EXAMPLES = r'''
-- name: Create new container with minimal options
- community.general.proxmox:
- vmid: 100
- node: uk-mc02
- api_user: root@pam
- api_password: 1q2w3e
- api_host: node1
- password: 123456
- hostname: example.org
- ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz'
-
-- name: Create new container with minimal options specifying disk storage location and size
- community.general.proxmox:
- vmid: 100
- node: uk-mc02
- api_user: root@pam
- api_password: 1q2w3e
- api_host: node1
- password: 123456
- hostname: example.org
- ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz'
- disk: 'local-lvm:20'
-
-- name: Create new container with minimal options specifying disk storage location and size via disk_volume
- community.general.proxmox:
- vmid: 100
- node: uk-mc02
- api_user: root@pam
- api_password: 1q2w3e
- api_host: node1
- password: 123456
- hostname: example.org
- ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz'
- disk_volume:
- storage: local
- size: 20
-
-- name: Create new container with hookscript and description
- community.general.proxmox:
- vmid: 100
- node: uk-mc02
- api_user: root@pam
- api_password: 1q2w3e
- api_host: node1
- password: 123456
- hostname: example.org
- ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz'
- hookscript: 'local:snippets/vm_hook.sh'
- description: created with ansible
-
-- name: Create new container automatically selecting the next available vmid.
- community.general.proxmox:
- node: 'uk-mc02'
- api_user: 'root@pam'
- api_password: '1q2w3e'
- api_host: 'node1'
- password: '123456'
- hostname: 'example.org'
- ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz'
-
-- name: Create new container with minimal options with force(it will rewrite existing container)
- community.general.proxmox:
- vmid: 100
- node: uk-mc02
- api_user: root@pam
- api_password: 1q2w3e
- api_host: node1
- password: 123456
- hostname: example.org
- ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz'
- force: true
-
-- name: Create new container with minimal options use environment PROXMOX_PASSWORD variable(you should export it before)
- community.general.proxmox:
- vmid: 100
- node: uk-mc02
- api_user: root@pam
- api_host: node1
- password: 123456
- hostname: example.org
- ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz'
-
-- name: Create new container with minimal options defining network interface with dhcp
- community.general.proxmox:
- vmid: 100
- node: uk-mc02
- api_user: root@pam
- api_password: 1q2w3e
- api_host: node1
- password: 123456
- hostname: example.org
- ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz'
- netif:
- net0: "name=eth0,ip=dhcp,ip6=dhcp,bridge=vmbr0"
-
-- name: Create new container with minimal options defining network interface with static ip
- community.general.proxmox:
- vmid: 100
- node: uk-mc02
- api_user: root@pam
- api_password: 1q2w3e
- api_host: node1
- password: 123456
- hostname: example.org
- ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz'
- netif:
- net0: "name=eth0,gw=192.168.0.1,ip=192.168.0.2/24,bridge=vmbr0"
-
-- name: Create new container with more options defining network interface with static ip4 and ip6 with vlan-tag and mtu
- community.general.proxmox:
- vmid: 100
- node: uk-mc02
- api_user: root@pam
- api_password: 1q2w3e
- api_host: node1
- password: 123456
- hostname: example.org
- ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz'
- netif:
- net0: "name=eth0,gw=192.168.0.1,ip=192.168.0.2/24,ip6=fe80::1227/64,gw6=fe80::1,bridge=vmbr0,firewall=1,tag=934,mtu=1500"
-
-- name: Create new container with minimal options defining a mount with 8GB
- community.general.proxmox:
- vmid: 100
- node: uk-mc02
- api_user: root@pam
- api_password: 1q2w3e
- api_host: node1
- password: 123456
- hostname: example.org
- ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz'
- mounts:
- mp0: "local:8,mp=/mnt/test/"
-
-- name: Create new container with minimal options defining a mount with 8GB using mount_volumes
- community.general.proxmox:
- vmid: 100
- node: uk-mc02
- api_user: root@pam
- api_password: 1q2w3e
- api_host: node1
- password: 123456
- hostname: example.org
- ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz'
- mount_volumes:
- - id: mp0
- storage: local
- size: 8
- mountpoint: /mnt/test
-
-- name: Create new container with minimal options defining a cpu core limit
- community.general.proxmox:
- vmid: 100
- node: uk-mc02
- api_user: root@pam
- api_password: 1q2w3e
- api_host: node1
- password: 123456
- hostname: example.org
- ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz'
- cores: 2
-
-- name: Create new container with minimal options and same timezone as proxmox host
- community.general.proxmox:
- vmid: 100
- node: uk-mc02
- api_user: root@pam
- api_password: 1q2w3e
- api_host: node1
- password: 123456
- hostname: example.org
- ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz'
- timezone: host
-
-- name: Create a new container with nesting enabled and allows the use of CIFS/NFS inside the container.
- community.general.proxmox:
- vmid: 100
- node: uk-mc02
- api_user: root@pam
- api_password: 1q2w3e
- api_host: node1
- password: 123456
- hostname: example.org
- ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz'
- features:
- - nesting=1
- - mount=cifs,nfs
-
-- name: >
- Create a linked clone of the template container with id 100. The newly created container with be a
- linked clone, because no storage parameter is defined
- community.general.proxmox:
- vmid: 201
- node: uk-mc02
- api_user: root@pam
- api_password: 1q2w3e
- api_host: node1
- clone: 100
- hostname: clone.example.org
-
-- name: Create a full clone of the container with id 100
- community.general.proxmox:
- vmid: 201
- node: uk-mc02
- api_user: root@pam
- api_password: 1q2w3e
- api_host: node1
- clone: 100
- hostname: clone.example.org
- storage: local
-
-- name: Update container configuration
- community.general.proxmox:
- vmid: 100
- node: uk-mc02
- api_user: root@pam
- api_password: 1q2w3e
- api_host: node1
- netif:
- net0: "name=eth0,gw=192.168.0.1,ip=192.168.0.3/24,bridge=vmbr0"
- update: true
-
-- name: Start container
- community.general.proxmox:
- vmid: 100
- api_user: root@pam
- api_password: 1q2w3e
- api_host: node1
- state: started
-
-- name: >
- Start container with mount. You should enter a 90-second timeout because servers
- with additional disks take longer to boot
- community.general.proxmox:
- vmid: 100
- api_user: root@pam
- api_password: 1q2w3e
- api_host: node1
- state: started
- timeout: 90
-
-- name: Stop container
- community.general.proxmox:
- vmid: 100
- api_user: root@pam
- api_password: 1q2w3e
- api_host: node1
- state: stopped
-
-- name: Stop container with force
- community.general.proxmox:
- vmid: 100
- api_user: root@pam
- api_password: 1q2w3e
- api_host: node1
- force: true
- state: stopped
-
-- name: Restart container(stopped or mounted container you can't restart)
- community.general.proxmox:
- vmid: 100
- api_user: root@pam
- api_password: 1q2w3e
- api_host: node1
- state: restarted
-
-- name: Convert container to template
- community.general.proxmox:
- vmid: 100
- api_user: root@pam
- api_password: 1q2w3e
- api_host: node1
- state: template
-
-- name: Convert container to template (stop container if running)
- community.general.proxmox:
- vmid: 100
- api_user: root@pam
- api_password: 1q2w3e
- api_host: node1
- state: template
- force: true
-
-- name: Remove container
- community.general.proxmox:
- vmid: 100
- api_user: root@pam
- api_password: 1q2w3e
- api_host: node1
- state: absent
-'''
-
-import re
-import time
-
-from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible.module_utils.common.text.converters import to_native
-
-
-from ansible_collections.community.general.plugins.module_utils.proxmox import (
- ansible_to_proxmox_bool, proxmox_auth_argument_spec, ProxmoxAnsible)
-
-VZ_TYPE = None
-
-
-class ProxmoxLxcAnsible(ProxmoxAnsible):
- def content_check(self, node, ostemplate, template_store):
- return [True for cnt in self.proxmox_api.nodes(node).storage(template_store).content.get() if cnt['volid'] == ostemplate]
-
- def is_template_container(self, node, vmid):
- """Check if the specified container is a template."""
- proxmox_node = self.proxmox_api.nodes(node)
- config = getattr(proxmox_node, VZ_TYPE)(vmid).config.get()
- return config.get('template', False)
-
- def update_config(self, vmid, node, disk, cpus, memory, swap, **kwargs):
- if VZ_TYPE != "lxc":
- self.module.fail_json(
- changed=False,
- msg="Updating configuration is only supported for LXC enabled proxmox clusters.",
- )
-
- def parse_disk_string(disk_string):
- # Example strings:
- # "acl=0,thin1:base-100-disk-1,size=8G"
- # "thin1:10,backup=0"
- # "local:20"
- # "volume=local-lvm:base-100-disk-1,size=20G"
- # "/mnt/bindmounts/shared,mp=/shared"
- # "volume=/dev/USB01,mp=/mnt/usb01"
- args = disk_string.split(",")
- # If the volume is not explicitly defined but implicit by only passing a key,
- # add the "volume=" key prefix for ease of parsing.
- args = ["volume=" + arg if "=" not in arg else arg for arg in args]
- # Then create a dictionary from the arguments
- disk_kwargs = dict(map(lambda item: item.split("="), args))
-
- VOLUME_PATTERN = r"""(?x)
- (?:(?P[\w\-.]+):
- (?:(?P\d+)|
- (?P[^,\s]+))
- )|
- (?P[^,\s]+)
- """
- # DISCLAIMER:
- # There are two things called a "volume":
- # 1. The "volume" key which describes the storage volume, device or directory to mount into the container.
- # 2. The storage volume of a storage-backed mount point in the PVE storage sub system.
- # In this section, we parse the "volume" key and check which type of mount point we are dealing with.
- pattern = re.compile(VOLUME_PATTERN)
- match_dict = pattern.match(disk_kwargs.pop("volume")).groupdict()
- match_dict = {k: v for k, v in match_dict.items() if v is not None}
-
- if "storage" in match_dict and "volume" in match_dict:
- disk_kwargs["storage"] = match_dict["storage"]
- disk_kwargs["volume"] = match_dict["volume"]
- elif "storage" in match_dict and "size" in match_dict:
- disk_kwargs["storage"] = match_dict["storage"]
- disk_kwargs["size"] = match_dict["size"]
- elif "host_path" in match_dict:
- disk_kwargs["host_path"] = match_dict["host_path"]
-
- # Pattern matching only available in Python 3.10+
- # match match_dict:
- # case {"storage": storage, "volume": volume}:
- # disk_kwargs["storage"] = storage
- # disk_kwargs["volume"] = volume
-
- # case {"storage": storage, "size": size}:
- # disk_kwargs["storage"] = storage
- # disk_kwargs["size"] = size
-
- # case {"host_path": host_path}:
- # disk_kwargs["host_path"] = host_path
-
- return disk_kwargs
-
- def convert_mounts(mount_dict):
- return_list = []
- for mount_key, mount_value in mount_dict.items():
- mount_config = parse_disk_string(mount_value)
- return_list.append(dict(id=mount_key, **mount_config))
-
- return return_list
-
- def build_volume(
- key,
- storage=None,
- volume=None,
- host_path=None,
- size=None,
- mountpoint=None,
- options=None,
- **kwargs
- ):
- if size is not None and isinstance(size, str):
- size = size.strip("G")
- # 1. Handle volume checks/creation
- # 1.1 Check if defined volume exists
- if volume is not None:
- storage_content = self.get_storage_content(node, storage, vmid=vmid)
- vol_ids = [vol["volid"] for vol in storage_content]
- volid = "{storage}:{volume}".format(storage=storage, volume=volume)
- if volid not in vol_ids:
- self.module.fail_json(
- changed=False,
- msg="Storage {storage} does not contain volume {volume}".format(
- storage=storage,
- volume=volume,
- ),
- )
- vol_string = "{storage}:{volume},size={size}G".format(
- storage=storage, volume=volume, size=size
- )
- # 1.2 If volume not defined (but storage is), check if it exists
- elif storage is not None:
- api_node = self.proxmox_api.nodes(
- node
- ) # The node must exist, but not the LXC
- try:
- vol = api_node.lxc(vmid).get("config").get(key)
- volume = parse_disk_string(vol).get("volume")
- vol_string = "{storage}:{volume},size={size}G".format(
- storage=storage, volume=volume, size=size
- )
-
- # If not, we have proxmox create one using the special syntax
- except Exception:
- vol_string = "{storage}:{size}".format(storage=storage, size=size)
- else:
- raise AssertionError('Internal error')
-
- # 1.3 If we have a host_path, we don't have storage, a volume, or a size
- vol_string = ",".join(
- [vol_string] +
- ([] if host_path is None else [host_path]) +
- ([] if mountpoint is None else ["mp={0}".format(mountpoint)]) +
- ([] if options is None else ["{0}={1}".format(k, v) for k, v in options.items()]) +
- ([] if not kwargs else ["{0}={1}".format(k, v) for k, v in kwargs.items()])
- )
-
- return {key: vol_string}
-
- # Version limited features
- minimum_version = {"tags": "6.1", "timezone": "6.3"}
- proxmox_node = self.proxmox_api.nodes(node)
-
- pve_version = self.version()
-
- # Fail on unsupported features
- for option, version in minimum_version.items():
- if pve_version < LooseVersion(version) and option in kwargs:
- self.module.fail_json(
- changed=False,
- msg="Feature {option} is only supported in PVE {version}+, and you're using PVE {pve_version}".format(
- option=option, version=version, pve_version=pve_version
- ),
- )
-
- # Remove all empty kwarg entries
- kwargs = {key: val for key, val in kwargs.items() if val is not None}
-
- if cpus is not None:
- kwargs["cpulimit"] = cpus
- if disk is not None:
- kwargs["disk_volume"] = parse_disk_string(disk)
- if "disk_volume" in kwargs:
- disk_dict = build_volume(key="rootfs", **kwargs.pop("disk_volume"))
- kwargs.update(disk_dict)
- if memory is not None:
- kwargs["memory"] = memory
- if swap is not None:
- kwargs["swap"] = swap
- if "netif" in kwargs:
- kwargs.update(kwargs.pop("netif"))
- if "mounts" in kwargs:
- kwargs["mount_volumes"] = convert_mounts(kwargs.pop("mounts"))
- if "mount_volumes" in kwargs:
- mounts_list = kwargs.pop("mount_volumes")
- for mount_config in mounts_list:
- key = mount_config.pop("id")
- mount_dict = build_volume(key=key, **mount_config)
- kwargs.update(mount_dict)
- # LXC tags are expected to be valid and presented as a comma/semi-colon delimited string
- if "tags" in kwargs:
- re_tag = re.compile(r"^[a-z0-9_][a-z0-9_\-\+\.]*$")
- for tag in kwargs["tags"]:
- if not re_tag.match(tag):
- self.module.fail_json(msg="%s is not a valid tag" % tag)
- kwargs["tags"] = ",".join(kwargs["tags"])
-
- # fetch the current config
- current_config = getattr(proxmox_node, VZ_TYPE)(vmid).config.get()
-
- # compare the requested config against the current
- update_config = False
- for (arg, value) in kwargs.items():
- # if the arg isn't in the current config, it needs to be updated
- if arg not in current_config:
- update_config = True
- break
- # some values are lists, the order isn't always the same, so split them and compare by key
- if isinstance(value, str):
- current_values = current_config[arg].split(",")
- requested_values = value.split(",")
- for new_value in requested_values:
- if new_value not in current_values:
- update_config = True
- break
- # if it's not a list (or string) just compare the current value
- else:
- # some types don't match with the API, so forcing to string for comparison
- if str(value) != str(current_config[arg]):
- update_config = True
- break
-
- if update_config:
- getattr(proxmox_node, VZ_TYPE)(vmid).config.put(vmid=vmid, node=node, **kwargs)
- else:
- self.module.exit_json(changed=False, msg="Container config is already up to date")
-
- def create_instance(self, vmid, node, disk, storage, cpus, memory, swap, timeout, clone, **kwargs):
-
- # Version limited features
- minimum_version = {
- 'tags': '6.1',
- 'timezone': '6.3'
- }
- proxmox_node = self.proxmox_api.nodes(node)
-
- # Remove all empty kwarg entries
- kwargs = {k: v for k, v in kwargs.items() if v is not None}
-
- pve_version = self.version()
-
- # Fail on unsupported features
- for option, version in minimum_version.items():
- if pve_version < LooseVersion(version) and option in kwargs:
- self.module.fail_json(changed=False, msg="Feature {option} is only supported in PVE {version}+, and you're using PVE {pve_version}".
- format(option=option, version=version, pve_version=pve_version))
-
- if VZ_TYPE == 'lxc':
- kwargs['cpulimit'] = cpus
- kwargs['rootfs'] = disk
- if 'netif' in kwargs:
- kwargs.update(kwargs['netif'])
- del kwargs['netif']
- if 'mounts' in kwargs:
- kwargs.update(kwargs['mounts'])
- del kwargs['mounts']
- if 'pubkey' in kwargs:
- if self.version() >= LooseVersion('4.2'):
- kwargs['ssh-public-keys'] = kwargs['pubkey']
- del kwargs['pubkey']
- else:
- kwargs['cpus'] = cpus
- kwargs['disk'] = disk
-
- # LXC tags are expected to be valid and presented as a comma/semi-colon delimited string
- if 'tags' in kwargs:
- re_tag = re.compile(r'^[a-z0-9_][a-z0-9_\-\+\.]*$')
- for tag in kwargs['tags']:
- if not re_tag.match(tag):
- self.module.fail_json(msg='%s is not a valid tag' % tag)
- kwargs['tags'] = ",".join(kwargs['tags'])
-
- if kwargs.get('ostype') == 'auto':
- kwargs.pop('ostype')
-
- if clone is not None:
- if VZ_TYPE != 'lxc':
- self.module.fail_json(changed=False, msg="Clone operator is only supported for LXC enabled proxmox clusters.")
-
- clone_is_template = self.is_template_container(node, clone)
-
- # By default, create a full copy only when the cloned container is not a template.
- create_full_copy = not clone_is_template
-
- # Only accept parameters that are compatible with the clone endpoint.
- valid_clone_parameters = ['hostname', 'pool', 'description']
- if self.module.params['storage'] is not None and clone_is_template:
- # Cloning a template, so create a full copy instead of a linked copy
- create_full_copy = True
- elif self.module.params['storage'] is None and not clone_is_template:
- # Not cloning a template, but also no defined storage. This isn't possible.
- self.module.fail_json(changed=False, msg="Cloned container is not a template, storage needs to be specified.")
-
- if self.module.params['clone_type'] == 'linked':
- if not clone_is_template:
- self.module.fail_json(changed=False, msg="'linked' clone type is specified, but cloned container is not a template container.")
- # Don't need to do more, by default create_full_copy is set to false already
- elif self.module.params['clone_type'] == 'opportunistic':
- if not clone_is_template:
- # Cloned container is not a template, so we need our 'storage' parameter
- valid_clone_parameters.append('storage')
- elif self.module.params['clone_type'] == 'full':
- create_full_copy = True
- valid_clone_parameters.append('storage')
-
- clone_parameters = {}
-
- if create_full_copy:
- clone_parameters['full'] = '1'
- else:
- clone_parameters['full'] = '0'
- for param in valid_clone_parameters:
- if self.module.params[param] is not None:
- clone_parameters[param] = self.module.params[param]
-
- taskid = getattr(proxmox_node, VZ_TYPE)(clone).clone.post(newid=vmid, **clone_parameters)
- else:
- taskid = getattr(proxmox_node, VZ_TYPE).create(vmid=vmid, storage=storage, memory=memory, swap=swap, **kwargs)
-
- while timeout:
- if self.api_task_ok(node, taskid):
- return True
- timeout -= 1
- if timeout == 0:
- self.module.fail_json(vmid=vmid, node=node, msg='Reached timeout while waiting for creating VM. Last line in task before timeout: %s' %
- proxmox_node.tasks(taskid).log.get()[:1])
-
- time.sleep(1)
- return False
-
- def start_instance(self, vm, vmid, timeout):
- taskid = getattr(self.proxmox_api.nodes(vm['node']), VZ_TYPE)(vmid).status.start.post()
- while timeout:
- if self.api_task_ok(vm['node'], taskid):
- return True
- timeout -= 1
- if timeout == 0:
- self.module.fail_json(vmid=vmid, taskid=taskid, msg='Reached timeout while waiting for starting VM. Last line in task before timeout: %s' %
- self.proxmox_api.nodes(vm['node']).tasks(taskid).log.get()[:1])
-
- time.sleep(1)
- return False
-
- def stop_instance(self, vm, vmid, timeout, force):
- if force:
- taskid = getattr(self.proxmox_api.nodes(vm['node']), VZ_TYPE)(vmid).status.shutdown.post(forceStop=1)
- else:
- taskid = getattr(self.proxmox_api.nodes(vm['node']), VZ_TYPE)(vmid).status.shutdown.post()
- while timeout:
- if self.api_task_ok(vm['node'], taskid):
- return True
- timeout -= 1
- if timeout == 0:
- self.module.fail_json(vmid=vmid, taskid=taskid, msg='Reached timeout while waiting for stopping VM. Last line in task before timeout: %s' %
- self.proxmox_api.nodes(vm['node']).tasks(taskid).log.get()[:1])
-
- time.sleep(1)
- return False
-
- def convert_to_template(self, vm, vmid, timeout, force):
- if getattr(self.proxmox_api.nodes(vm['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'running' and force:
- self.stop_instance(vm, vmid, timeout, force)
- # not sure why, but templating a container doesn't return a taskid
- getattr(self.proxmox_api.nodes(vm['node']), VZ_TYPE)(vmid).template.post()
- return True
-
- def umount_instance(self, vm, vmid, timeout):
- taskid = getattr(self.proxmox_api.nodes(vm['node']), VZ_TYPE)(vmid).status.umount.post()
- while timeout:
- if self.api_task_ok(vm['node'], taskid):
- return True
- timeout -= 1
- if timeout == 0:
- self.module.fail_json(vmid=vmid, taskid=taskid, msg='Reached timeout while waiting for unmounting VM. Last line in task before timeout: %s' %
- self.proxmox_api.nodes(vm['node']).tasks(taskid).log.get()[:1])
-
- time.sleep(1)
- return False
-
-
-def main():
- module_args = proxmox_auth_argument_spec()
- proxmox_args = dict(
- vmid=dict(type='int', required=False),
- node=dict(),
- pool=dict(),
- password=dict(no_log=True),
- hostname=dict(),
- ostemplate=dict(),
- disk=dict(type='str'),
- disk_volume=dict(
- type="dict",
- options=dict(
- storage=dict(type="str"),
- volume=dict(type="str"),
- size=dict(type="int"),
- host_path=dict(type="path"),
- options=dict(type="dict"),
- ),
- required_together=[("storage", "size")],
- required_by={
- "volume": ("storage", "size"),
- },
- mutually_exclusive=[
- ("host_path", "storage"),
- ("host_path", "volume"),
- ("host_path", "size"),
- ],
- ),
- cores=dict(type='int'),
- cpus=dict(type='int'),
- memory=dict(type='int'),
- swap=dict(type='int'),
- netif=dict(type='dict'),
- mounts=dict(type='dict'),
- mount_volumes=dict(
- type="list",
- elements="dict",
- options=dict(
- id=(dict(type="str", required=True)),
- storage=dict(type="str"),
- volume=dict(type="str"),
- size=dict(type="int"),
- host_path=dict(type="path"),
- mountpoint=dict(type="path", required=True),
- options=dict(type="dict"),
- ),
- required_together=[("storage", "size")],
- required_by={
- "volume": ("storage", "size"),
- },
- mutually_exclusive=[
- ("host_path", "storage"),
- ("host_path", "volume"),
- ("host_path", "size"),
- ],
- ),
- ip_address=dict(),
- ostype=dict(default='auto', choices=[
- 'auto', 'debian', 'devuan', 'ubuntu', 'centos', 'fedora', 'opensuse', 'archlinux', 'alpine', 'gentoo', 'nixos', 'unmanaged'
- ]),
- onboot=dict(type='bool'),
- features=dict(type='list', elements='str'),
- startup=dict(type='list', elements='str'),
- storage=dict(default='local'),
- cpuunits=dict(type='int'),
- nameserver=dict(),
- searchdomain=dict(),
- timeout=dict(type='int', default=30),
- update=dict(type='bool', default=False),
- force=dict(type='bool', default=False),
- purge=dict(type='bool', default=False),
- state=dict(default='present', choices=['present', 'absent', 'stopped', 'started', 'restarted', 'template']),
- pubkey=dict(type='str'),
- unprivileged=dict(type='bool', default=True),
- description=dict(type='str'),
- hookscript=dict(type='str'),
- timezone=dict(type='str'),
- clone=dict(type='int'),
- clone_type=dict(default='opportunistic', choices=['full', 'linked', 'opportunistic']),
- tags=dict(type='list', elements='str')
- )
- module_args.update(proxmox_args)
-
- module = AnsibleModule(
- argument_spec=module_args,
- required_if=[
- ('state', 'present', ['node', 'hostname']),
- # Require one of clone, ostemplate, or update. Together with mutually_exclusive this ensures that we
- # either clone a container or create a new one from a template file.
- ('state', 'present', ('clone', 'ostemplate', 'update'), True),
- ],
- required_together=[("api_token_id", "api_token_secret")],
- required_one_of=[("api_password", "api_token_id")],
- mutually_exclusive=[
- (
- "clone",
- "ostemplate",
- "update",
- ), # Creating a new container is done either by cloning an existing one, or based on a template.
- ("disk", "disk_volume", "storage"),
- ("mounts", "mount_volumes"),
- ],
- )
-
- proxmox = ProxmoxLxcAnsible(module)
-
- global VZ_TYPE
- VZ_TYPE = 'openvz' if proxmox.version() < LooseVersion('4.0') else 'lxc'
-
- state = module.params['state']
- vmid = module.params['vmid']
- node = module.params['node']
- disk = module.params['disk']
- cpus = module.params['cpus']
- memory = module.params['memory']
- swap = module.params['swap']
- storage = module.params['storage']
- hostname = module.params['hostname']
- if module.params['ostemplate'] is not None:
- template_store = module.params['ostemplate'].split(":")[0]
- timeout = module.params['timeout']
- clone = module.params['clone']
-
- # If vmid not set get the Next VM id from ProxmoxAPI
- # If hostname is set get the VM id from ProxmoxAPI
- if not vmid and state == 'present':
- vmid = proxmox.get_nextvmid()
- elif not vmid and hostname:
- vmid = proxmox.get_vmid(hostname)
- elif not vmid:
- module.exit_json(changed=False, msg="Vmid could not be fetched for the following action: %s" % state)
-
- # Create a new container
- if state == 'present' and clone is None:
- try:
- if proxmox.get_vm(vmid, ignore_missing=True):
- if module.params["update"]:
- try:
- proxmox.update_config(vmid, node, disk, cpus, memory, swap,
- cores=module.params["cores"],
- hostname=module.params["hostname"],
- netif=module.params["netif"],
- disk_volume=module.params["disk_volume"],
- mounts=module.params["mounts"],
- mount_volumes=module.params["mount_volumes"],
- ip_address=module.params["ip_address"],
- onboot=ansible_to_proxmox_bool(module.params["onboot"]),
- cpuunits=module.params["cpuunits"],
- nameserver=module.params["nameserver"],
- searchdomain=module.params["searchdomain"],
- features=",".join(module.params["features"])
- if module.params["features"] is not None
- else None,
- startup=",".join(module.params["startup"])
- if module.params["startup"] is not None
- else None,
- description=module.params["description"],
- hookscript=module.params["hookscript"],
- timezone=module.params["timezone"],
- tags=module.params["tags"])
- module.exit_json(
- changed=True,
- vmid=vmid,
- msg="Configured VM %s" % (vmid),
- )
- except Exception as e:
- module.fail_json(
- vmid=vmid,
- msg="Configuration of %s VM %s failed with exception: %s"
- % (VZ_TYPE, vmid, e),
- )
- if not module.params["force"]:
- module.exit_json(
- changed=False,
- vmid=vmid,
- msg="VM with vmid = %s is already exists" % vmid,
- )
- # If no vmid was passed, there cannot be another VM named 'hostname'
- if (not module.params['vmid'] and
- proxmox.get_vmid(hostname, ignore_missing=True) and
- not module.params['force']):
- vmid = proxmox.get_vmid(hostname)
- module.exit_json(changed=False, vmid=vmid, msg="VM with hostname %s already exists and has ID number %s" % (hostname, vmid))
- elif not proxmox.get_node(node):
- module.fail_json(vmid=vmid, msg="node '%s' not exists in cluster" % node)
- elif not proxmox.content_check(node, module.params['ostemplate'], template_store):
- module.fail_json(vmid=vmid, msg="ostemplate '%s' not exists on node %s and storage %s"
- % (module.params['ostemplate'], node, template_store))
- except Exception as e:
- module.fail_json(vmid=vmid, msg="Pre-creation checks of {VZ_TYPE} VM {vmid} failed with exception: {e}".format(VZ_TYPE=VZ_TYPE, vmid=vmid, e=e))
-
- try:
- proxmox.create_instance(vmid, node, disk, storage, cpus, memory, swap, timeout, clone,
- cores=module.params['cores'],
- pool=module.params['pool'],
- password=module.params['password'],
- hostname=module.params['hostname'],
- ostemplate=module.params['ostemplate'],
- netif=module.params['netif'],
- disk_volume=module.params["disk_volume"],
- mounts=module.params['mounts'],
- mount_volumes=module.params["mount_volumes"],
- ostype=module.params['ostype'],
- ip_address=module.params['ip_address'],
- onboot=ansible_to_proxmox_bool(module.params['onboot']),
- cpuunits=module.params['cpuunits'],
- nameserver=module.params['nameserver'],
- searchdomain=module.params['searchdomain'],
- force=ansible_to_proxmox_bool(module.params['force']),
- pubkey=module.params['pubkey'],
- features=",".join(module.params['features']) if module.params['features'] is not None else None,
- startup=",".join(module.params['startup']) if module.params['startup'] is not None else None,
- unprivileged=ansible_to_proxmox_bool(module.params['unprivileged']),
- description=module.params['description'],
- hookscript=module.params['hookscript'],
- timezone=module.params['timezone'],
- tags=module.params['tags'])
-
- module.exit_json(changed=True, vmid=vmid, msg="Deployed VM %s from template %s" % (vmid, module.params['ostemplate']))
- except Exception as e:
- module.fail_json(vmid=vmid, msg="Creation of %s VM %s failed with exception: %s" % (VZ_TYPE, vmid, e))
-
- # Clone a container
- elif state == 'present' and clone is not None:
- try:
- if proxmox.get_vm(vmid, ignore_missing=True) and not module.params['force']:
- module.exit_json(changed=False, vmid=vmid, msg="VM with vmid = %s is already exists" % vmid)
- # If no vmid was passed, there cannot be another VM named 'hostname'
- if (not module.params['vmid'] and
- proxmox.get_vmid(hostname, ignore_missing=True) and
- not module.params['force']):
- vmid = proxmox.get_vmid(hostname)
- module.exit_json(changed=False, vmid=vmid, msg="VM with hostname %s already exists and has ID number %s" % (hostname, vmid))
- if not proxmox.get_vm(clone, ignore_missing=True):
- module.exit_json(changed=False, vmid=vmid, msg="Container to be cloned does not exist")
- except Exception as e:
- module.fail_json(vmid=vmid, msg="Pre-clone checks of {VZ_TYPE} VM {vmid} failed with exception: {e}".format(VZ_TYPE=VZ_TYPE, vmid=vmid, e=e))
-
- try:
- proxmox.create_instance(vmid, node, disk, storage, cpus, memory, swap, timeout, clone)
-
- module.exit_json(changed=True, vmid=vmid, msg="Cloned VM %s from %s" % (vmid, clone))
- except Exception as e:
- module.fail_json(vmid=vmid, msg="Cloning %s VM %s failed with exception: %s" % (VZ_TYPE, vmid, e))
-
- elif state == 'started':
- try:
- vm = proxmox.get_vm(vmid)
- if getattr(proxmox.proxmox_api.nodes(vm['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'running':
- module.exit_json(changed=False, vmid=vmid, msg="VM %s is already running" % vmid)
-
- if proxmox.start_instance(vm, vmid, timeout):
- module.exit_json(changed=True, vmid=vmid, msg="VM %s started" % vmid)
- except Exception as e:
- module.fail_json(vmid=vmid, msg="starting of VM %s failed with exception: %s" % (vmid, e))
-
- elif state == 'stopped':
- try:
- vm = proxmox.get_vm(vmid)
-
- if getattr(proxmox.proxmox_api.nodes(vm['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'mounted':
- if module.params['force']:
- if proxmox.umount_instance(vm, vmid, timeout):
- module.exit_json(changed=True, vmid=vmid, msg="VM %s is shutting down" % vmid)
- else:
- module.exit_json(changed=False, vmid=vmid,
- msg=("VM %s is already shutdown, but mounted. You can use force option to umount it.") % vmid)
-
- if getattr(proxmox.proxmox_api.nodes(vm['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'stopped':
- module.exit_json(changed=False, vmid=vmid, msg="VM %s is already shutdown" % vmid)
-
- if proxmox.stop_instance(vm, vmid, timeout, force=module.params['force']):
- module.exit_json(changed=True, vmid=vmid, msg="VM %s is shutting down" % vmid)
- except Exception as e:
- module.fail_json(vmid=vmid, msg="stopping of VM %s failed with exception: %s" % (vmid, e))
-
- elif state == 'template':
- try:
- vm = proxmox.get_vm(vmid)
-
- proxmox.convert_to_template(vm, vmid, timeout, force=module.params['force'])
- module.exit_json(changed=True, msg="VM %s is converted to template" % vmid)
- except Exception as e:
- module.fail_json(vmid=vmid, msg="conversion of VM %s to template failed with exception: %s" % (vmid, e))
-
- elif state == 'restarted':
- try:
- vm = proxmox.get_vm(vmid)
-
- vm_status = getattr(proxmox.proxmox_api.nodes(vm['node']), VZ_TYPE)(vmid).status.current.get()['status']
- if vm_status in ['stopped', 'mounted']:
- module.exit_json(changed=False, vmid=vmid, msg="VM %s is not running" % vmid)
-
- if (proxmox.stop_instance(vm, vmid, timeout, force=module.params['force']) and
- proxmox.start_instance(vm, vmid, timeout)):
- module.exit_json(changed=True, vmid=vmid, msg="VM %s is restarted" % vmid)
- except Exception as e:
- module.fail_json(vmid=vmid, msg="restarting of VM %s failed with exception: %s" % (vmid, e))
-
- elif state == 'absent':
- if not vmid:
- module.exit_json(changed=False, vmid=vmid, msg='VM with hostname = %s is already absent' % hostname)
- try:
- vm = proxmox.get_vm(vmid, ignore_missing=True)
- if not vm:
- module.exit_json(changed=False, vmid=vmid, msg="VM %s does not exist" % vmid)
-
- vm_status = getattr(proxmox.proxmox_api.nodes(vm['node']), VZ_TYPE)(vmid).status.current.get()['status']
- if vm_status == 'running':
- module.exit_json(changed=False, vmid=vmid, msg="VM %s is running. Stop it before deletion." % vmid)
-
- if vm_status == 'mounted':
- module.exit_json(changed=False, vmid=vmid, msg="VM %s is mounted. Stop it with force option before deletion." % vmid)
-
- delete_params = {}
-
- if module.params['purge']:
- delete_params['purge'] = 1
-
- taskid = getattr(proxmox.proxmox_api.nodes(vm['node']), VZ_TYPE).delete(vmid, **delete_params)
-
- while timeout:
- if proxmox.api_task_ok(vm['node'], taskid):
- module.exit_json(changed=True, vmid=vmid, taskid=taskid, msg="VM %s removed" % vmid)
- timeout -= 1
- if timeout == 0:
- module.fail_json(vmid=vmid, taskid=taskid, msg='Reached timeout while waiting for removing VM. Last line in task before timeout: %s'
- % proxmox.proxmox_api.nodes(vm['node']).tasks(taskid).log.get()[:1])
-
- time.sleep(1)
- except Exception as e:
- module.fail_json(vmid=vmid, msg="deletion of VM %s failed with exception: %s" % (vmid, to_native(e)))
-
-
-if __name__ == '__main__':
- main()
diff --git a/plugins/modules/proxmox_disk.py b/plugins/modules/proxmox_disk.py
deleted file mode 100644
index 3a49a2f58a..0000000000
--- a/plugins/modules/proxmox_disk.py
+++ /dev/null
@@ -1,890 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-#
-# Copyright (c) 2022, Castor Sky (@castorsky)
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-DOCUMENTATION = r'''
----
-module: proxmox_disk
-short_description: Management of a disk of a Qemu(KVM) VM in a Proxmox VE cluster
-version_added: 5.7.0
-description:
- - Allows you to perform some supported operations on a disk in Qemu(KVM) Virtual Machines in a Proxmox VE cluster.
-author: "Castor Sky (@castorsky) "
-attributes:
- check_mode:
- support: none
- diff_mode:
- support: none
- action_group:
- version_added: 9.0.0
-options:
- name:
- description:
- - The unique name of the VM.
- - You can specify either O(name) or O(vmid) or both of them.
- type: str
- vmid:
- description:
- - The unique ID of the VM.
- - You can specify either O(vmid) or O(name) or both of them.
- type: int
- disk:
- description:
- - The disk key (V(unused[n]), V(ide[n]), V(sata[n]), V(scsi[n]) or V(virtio[n])) you want to operate on.
- - Disk buses (IDE, SATA and so on) have fixed ranges of V(n) that accepted by Proxmox API.
- - >
- For IDE: 0-3;
- for SCSI: 0-30;
- for SATA: 0-5;
- for VirtIO: 0-15;
- for Unused: 0-255.
- type: str
- required: true
- state:
- description:
- - Indicates desired state of the disk.
- - >
- O(state=present) can be used to create, replace disk or update options in existing disk. It will create missing
- disk or update options in existing one by default. See the O(create) parameter description to control behavior
- of this option.
- - Some updates on options (like O(cache)) are not being applied instantly and require VM restart.
- - >
- Use O(state=detached) to detach existing disk from VM but do not remove it entirely.
- When O(state=detached) and disk is V(unused[n]) it will be left in same state (not removed).
- - >
- O(state=moved) may be used to change backing storage for the disk in bounds of the same VM
- or to send the disk to another VM (using the same backing storage).
- - >
- O(state=resized) intended to change the disk size. As of Proxmox 7.2 you can only increase the disk size
- because shrinking disks is not supported by the PVE API and has to be done manually.
- - To entirely remove the disk from backing storage use O(state=absent).
- type: str
- choices: ['present', 'resized', 'detached', 'moved', 'absent']
- default: present
- create:
- description:
- - With O(create) flag you can control behavior of O(state=present).
- - When O(create=disabled) it will not create new disk (if not exists) but will update options in existing disk.
- - When O(create=regular) it will either create new disk (if not exists) or update options in existing disk.
- - When O(create=forced) it will always create new disk (if disk exists it will be detached and left unused).
- type: str
- choices: ['disabled', 'regular', 'forced']
- default: regular
- storage:
- description:
- - The drive's backing storage.
- - Used only when O(state) is V(present).
- type: str
- size:
- description:
- - Desired volume size in GB to allocate when O(state=present) (specify O(size) without suffix).
- - >
- New (or additional) size of volume when O(state=resized). With the V(+) sign
- the value is added to the actual size of the volume
- and without it, the value is taken as an absolute one.
- type: str
- bwlimit:
- description:
- - Override I/O bandwidth limit (in KB/s).
- - Used only when O(state=moved).
- type: int
- delete_moved:
- description:
- - Delete the original disk after successful copy.
- - By default the original disk is kept as unused disk.
- - Used only when O(state=moved).
- type: bool
- target_disk:
- description:
- - The config key the disk will be moved to on the target VM (for example, V(ide0) or V(scsi1)).
- - Default is the source disk key.
- - Used only when O(state=moved).
- type: str
- target_storage:
- description:
- - Move the disk to this storage when O(state=moved).
- - You can move between storages only in scope of one VM.
- - Mutually exclusive with O(target_vmid).
- - Consider increasing O(timeout) in case of large disk images or slow storage backend.
- type: str
- target_vmid:
- description:
- - The (unique) ID of the VM where disk will be placed when O(state=moved).
- - You can move disk between VMs only when the same storage is used.
- - Mutually exclusive with O(target_vmid).
- type: int
- timeout:
- description:
- - Timeout in seconds to wait for slow operations such as importing disk or moving disk between storages.
- - Used only when O(state) is V(present) or V(moved).
- type: int
- default: 600
- aio:
- description:
- - AIO type to use.
- type: str
- choices: ['native', 'threads', 'io_uring']
- backup:
- description:
- - Whether the drive should be included when making backups.
- type: bool
- bps_max_length:
- description:
- - Maximum length of total r/w I/O bursts in seconds.
- type: int
- bps_rd_max_length:
- description:
- - Maximum length of read I/O bursts in seconds.
- type: int
- bps_wr_max_length:
- description:
- - Maximum length of write I/O bursts in seconds.
- type: int
- cache:
- description:
- - The drive's cache mode.
- type: str
- choices: ['none', 'writethrough', 'writeback', 'unsafe', 'directsync']
- cyls:
- description:
- - Force the drive's physical geometry to have a specific cylinder count.
- type: int
- detect_zeroes:
- description:
- - Control whether to detect and try to optimize writes of zeroes.
- type: bool
- discard:
- description:
- - Control whether to pass discard/trim requests to the underlying storage.
- type: str
- choices: ['ignore', 'on']
- format:
- description:
- - The drive's backing file's data format.
- type: str
- choices: ['raw', 'cow', 'qcow', 'qed', 'qcow2', 'vmdk', 'cloop']
- heads:
- description:
- - Force the drive's physical geometry to have a specific head count.
- type: int
- import_from:
- description:
- - Import volume from this existing one.
- - Volume string format
- - C(:/) or C(/)
- - Attention! Only root can use absolute paths.
- - This parameter is mutually exclusive with O(size).
- - Increase O(timeout) parameter when importing large disk images or using slow storage.
- type: str
- iops:
- description:
- - Maximum total r/w I/O in operations per second.
- - You can specify either total limit or per operation (mutually exclusive with O(iops_rd) and O(iops_wr)).
- type: int
- iops_max:
- description:
- - Maximum unthrottled total r/w I/O pool in operations per second.
- type: int
- iops_max_length:
- description:
- - Maximum length of total r/w I/O bursts in seconds.
- type: int
- iops_rd:
- description:
- - Maximum read I/O in operations per second.
- - You can specify either read or total limit (mutually exclusive with O(iops)).
- type: int
- iops_rd_max:
- description:
- - Maximum unthrottled read I/O pool in operations per second.
- type: int
- iops_rd_max_length:
- description:
- - Maximum length of read I/O bursts in seconds.
- type: int
- iops_wr:
- description:
- - Maximum write I/O in operations per second.
- - You can specify either write or total limit (mutually exclusive with O(iops)).
- type: int
- iops_wr_max:
- description:
- - Maximum unthrottled write I/O pool in operations per second.
- type: int
- iops_wr_max_length:
- description:
- - Maximum length of write I/O bursts in seconds.
- type: int
- iothread:
- description:
- - Whether to use iothreads for this drive (only for SCSI and VirtIO)
- type: bool
- mbps:
- description:
- - Maximum total r/w speed in megabytes per second.
- - Can be fractional but use with caution - fractionals less than 1 are not supported officially.
- - You can specify either total limit or per operation (mutually exclusive with O(mbps_rd) and O(mbps_wr)).
- type: float
- mbps_max:
- description:
- - Maximum unthrottled total r/w pool in megabytes per second.
- type: float
- mbps_rd:
- description:
- - Maximum read speed in megabytes per second.
- - You can specify either read or total limit (mutually exclusive with O(mbps)).
- type: float
- mbps_rd_max:
- description:
- - Maximum unthrottled read pool in megabytes per second.
- type: float
- mbps_wr:
- description:
- - Maximum write speed in megabytes per second.
- - You can specify either write or total limit (mutually exclusive with O(mbps)).
- type: float
- mbps_wr_max:
- description:
- - Maximum unthrottled write pool in megabytes per second.
- type: float
- media:
- description:
- - The drive's media type.
- type: str
- choices: ['cdrom', 'disk']
- iso_image:
- description:
- - The ISO image to be mounted on the specified in O(disk) CD-ROM.
- - O(media=cdrom) needs to be specified for this option to work.
- - "Image string format:"
- - V(:iso/) to mount ISO.
- - V(cdrom) to use physical CD/DVD drive.
- - V(none) to unmount image from existent CD-ROM or create empty CD-ROM drive.
- type: str
- version_added: 8.1.0
- queues:
- description:
- - Number of queues (SCSI only).
- type: int
- replicate:
- description:
- - Whether the drive should considered for replication jobs.
- type: bool
- rerror:
- description:
- - Read error action.
- type: str
- choices: ['ignore', 'report', 'stop']
- ro:
- description:
- - Whether the drive is read-only.
- type: bool
- scsiblock:
- description:
- - Whether to use scsi-block for full passthrough of host block device.
- - Can lead to I/O errors in combination with low memory or high memory fragmentation on host.
- type: bool
- secs:
- description:
- - Force the drive's physical geometry to have a specific sector count.
- type: int
- serial:
- description:
- - The drive's reported serial number, url-encoded, up to 20 bytes long.
- type: str
- shared:
- description:
- - Mark this locally-managed volume as available on all nodes.
- - This option does not share the volume automatically, it assumes it is shared already!
- type: bool
- snapshot:
- description:
- - Control qemu's snapshot mode feature.
- - If activated, changes made to the disk are temporary and will be discarded when the VM is shutdown.
- type: bool
- ssd:
- description:
- - Whether to expose this drive as an SSD, rather than a rotational hard disk.
- type: bool
- trans:
- description:
- - Force disk geometry bios translation mode.
- type: str
- choices: ['auto', 'lba', 'none']
- werror:
- description:
- - Write error action.
- type: str
- choices: ['enospc', 'ignore', 'report', 'stop']
- wwn:
- description:
- - The drive's worldwide name, encoded as 16 bytes hex string, prefixed by V(0x).
- type: str
-extends_documentation_fragment:
- - community.general.proxmox.actiongroup_proxmox
- - community.general.proxmox.documentation
- - community.general.attributes
-'''
-
-EXAMPLES = '''
-- name: Create new disk in VM (do not rewrite in case it exists already)
- community.general.proxmox_disk:
- api_host: node1
- api_user: root@pam
- api_token_id: token1
- api_token_secret: some-token-data
- name: vm-name
- disk: scsi3
- backup: true
- cache: none
- storage: local-zfs
- size: 5
- state: present
-
-- name: Create new disk in VM (force rewrite in case it exists already)
- community.general.proxmox_disk:
- api_host: node1
- api_user: root@pam
- api_token_id: token1
- api_token_secret: some-token-data
- vmid: 101
- disk: scsi3
- format: qcow2
- storage: local
- size: 16
- create: forced
- state: present
-
-- name: Update existing disk
- community.general.proxmox_disk:
- api_host: node1
- api_user: root@pam
- api_token_id: token1
- api_token_secret: some-token-data
- vmid: 101
- disk: ide0
- backup: false
- ro: true
- aio: native
- state: present
-
-- name: Grow existing disk
- community.general.proxmox_disk:
- api_host: node1
- api_user: root@pam
- api_token_id: token1
- api_token_secret: some-token-data
- vmid: 101
- disk: sata4
- size: +5G
- state: resized
-
-- name: Detach disk (leave it unused)
- community.general.proxmox_disk:
- api_host: node1
- api_user: root@pam
- api_token_id: token1
- api_token_secret: some-token-data
- name: vm-name
- disk: virtio0
- state: detached
-
-- name: Move disk to another storage
- community.general.proxmox_disk:
- api_host: node1
- api_user: root@pam
- api_password: secret
- vmid: 101
- disk: scsi7
- target_storage: local
- format: qcow2
- state: moved
-
-- name: Move disk from one VM to another
- community.general.proxmox_disk:
- api_host: node1
- api_user: root@pam
- api_token_id: token1
- api_token_secret: some-token-data
- vmid: 101
- disk: scsi7
- target_vmid: 201
- state: moved
-
-- name: Remove disk permanently
- community.general.proxmox_disk:
- api_host: node1
- api_user: root@pam
- api_password: secret
- vmid: 101
- disk: scsi4
- state: absent
-
-- name: Mount ISO image on CD-ROM (create drive if missing)
- community.general.proxmox_disk:
- api_host: node1
- api_user: root@pam
- api_token_id: token1
- api_token_secret: some-token-data
- vmid: 101
- disk: ide2
- media: cdrom
- iso_image: local:iso/favorite_distro_amd64.iso
- state: present
-'''
-
-RETURN = '''
-vmid:
- description: The VM vmid.
- returned: success
- type: int
- sample: 101
-msg:
- description: A short message on what the module did.
- returned: always
- type: str
- sample: "Disk scsi3 created in VM 101"
-'''
-
-from ansible.module_utils.basic import AnsibleModule
-
-from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
-from ansible_collections.community.general.plugins.module_utils.proxmox import (proxmox_auth_argument_spec,
- ProxmoxAnsible)
-from re import compile, match, sub
-
-
-def disk_conf_str_to_dict(config_string):
- """
- Transform Proxmox configuration string for disk element into dictionary which has
- volume option parsed in '{ storage }:{ volume }' format and other options parsed
- in '{ option }={ value }' format. This dictionary will be compared afterward with
- attributes that user passed to this module in playbook.\n
- config_string examples:
- - local-lvm:vm-100-disk-0,ssd=1,discard=on,size=25G
- - local:iso/new-vm-ignition.iso,media=cdrom,size=70k
- - none,media=cdrom
- :param config_string: Retrieved from Proxmox API configuration string
- :return: Dictionary with volume option divided into parts ('volume_name', 'storage_name', 'volume') \n
- and other options as key:value.
- """
- config = config_string.split(',')
-
- # When empty CD-ROM drive present, the volume part of config string is "none".
- storage_volume = config.pop(0)
- if storage_volume in ["none", "cdrom"]:
- config_current = dict(
- volume=storage_volume,
- storage_name=None,
- volume_name=None,
- size=None,
- )
- else:
- storage_volume = storage_volume.split(':')
- storage_name = storage_volume[0]
- volume_name = storage_volume[1]
- config_current = dict(
- volume='%s:%s' % (storage_name, volume_name),
- storage_name=storage_name,
- volume_name=volume_name,
- )
-
- config.sort()
- for option in config:
- k, v = option.split('=')
- config_current[k] = v
-
- return config_current
-
-
-class ProxmoxDiskAnsible(ProxmoxAnsible):
- create_update_fields = [
- 'aio', 'backup', 'bps_max_length', 'bps_rd_max_length', 'bps_wr_max_length',
- 'cache', 'cyls', 'detect_zeroes', 'discard', 'format', 'heads', 'import_from', 'iops', 'iops_max',
- 'iops_max_length', 'iops_rd', 'iops_rd_max', 'iops_rd_max_length', 'iops_wr', 'iops_wr_max',
- 'iops_wr_max_length', 'iothread', 'mbps', 'mbps_max', 'mbps_rd', 'mbps_rd_max', 'mbps_wr', 'mbps_wr_max',
- 'media', 'queues', 'replicate', 'rerror', 'ro', 'scsiblock', 'secs', 'serial', 'shared', 'snapshot',
- 'ssd', 'trans', 'werror', 'wwn'
- ]
- supported_bus_num_ranges = dict(
- ide=range(0, 4),
- scsi=range(0, 31),
- sata=range(0, 6),
- virtio=range(0, 16),
- unused=range(0, 256)
- )
-
- def get_create_attributes(self):
- # Sanitize parameters dictionary:
- # - Remove not defined args
- # - Ensure True and False converted to int.
- # - Remove unnecessary parameters
- params = {
- k: int(v) if isinstance(v, bool) else v
- for k, v in self.module.params.items()
- if v is not None and k in self.create_update_fields
- }
- return params
-
- def create_disk(self, disk, vmid, vm, vm_config):
- """Create a disk in the specified virtual machine. Check if creation is required,
- and if so, compile the disk configuration and create it by updating the virtual
- machine configuration. After calling the API function, wait for the result.
-
- :param disk: ID of the disk in format "".
- :param vmid: ID of the virtual machine where the disk will be created.
- :param vm: Name of the virtual machine where the disk will be created.
- :param vm_config: Configuration of the virtual machine.
- :return: (bool, string) Whether the task was successful or not
- and the message to return to Ansible.
- """
- create = self.module.params['create']
- if create == 'disabled' and disk not in vm_config:
- # NOOP
- return False, "Disk %s not found in VM %s and creation was disabled in parameters." % (disk, vmid)
-
- timeout_str = "Reached timeout. Last line in task before timeout: %s"
- if (create == 'regular' and disk not in vm_config) or (create == 'forced'):
- # CREATE
- playbook_config = self.get_create_attributes()
- import_string = playbook_config.pop('import_from', None)
- iso_image = self.module.params.get('iso_image', None)
-
- if import_string:
- # When 'import_from' option is present in task options.
- config_str = "%s:%s,import-from=%s" % (self.module.params["storage"], "0", import_string)
- timeout_str = "Reached timeout while importing VM disk. Last line in task before timeout: %s"
- ok_str = "Disk %s imported into VM %s"
- elif iso_image is not None:
- # disk=, media=cdrom, iso_image=
- config_str = iso_image
- ok_str = "CD-ROM was created on %s bus in VM %s"
- else:
- config_str = self.module.params["storage"]
- if not config_str:
- self.module.fail_json(msg="The storage option must be specified.")
- if self.module.params.get("media") != "cdrom":
- config_str += ":%s" % (self.module.params["size"])
- ok_str = "Disk %s created in VM %s"
- timeout_str = "Reached timeout while creating VM disk. Last line in task before timeout: %s"
-
- for k, v in playbook_config.items():
- config_str += ',%s=%s' % (k, v)
-
- disk_config_to_apply = {self.module.params["disk"]: config_str}
-
- if create in ['disabled', 'regular'] and disk in vm_config:
- # UPDATE
- ok_str = "Disk %s updated in VM %s"
- iso_image = self.module.params.get('iso_image', None)
-
- proxmox_config = disk_conf_str_to_dict(vm_config[disk])
- # 'import_from' fails on disk updates
- playbook_config = self.get_create_attributes()
- playbook_config.pop('import_from', None)
-
- # Begin composing configuration string
- if iso_image is not None:
- config_str = iso_image
- else:
- config_str = proxmox_config["volume"]
- # Append all mandatory fields from playbook_config
- for k, v in playbook_config.items():
- config_str += ',%s=%s' % (k, v)
-
- # Append to playbook_config fields which are constants for disk images
- for option in ['size', 'storage_name', 'volume', 'volume_name']:
- playbook_config.update({option: proxmox_config[option]})
- # CD-ROM is special disk device and its disk image is subject to change
- if iso_image is not None:
- playbook_config['volume'] = iso_image
- # Values in params are numbers, but strings are needed to compare with disk_config
- playbook_config = {k: str(v) for k, v in playbook_config.items()}
-
- # Now compare old and new config to detect if changes are needed
- if proxmox_config == playbook_config:
- return False, "Disk %s is up to date in VM %s" % (disk, vmid)
-
- disk_config_to_apply = {self.module.params["disk"]: config_str}
-
- current_task_id = self.proxmox_api.nodes(vm['node']).qemu(vmid).config.post(**disk_config_to_apply)
- task_success, fail_reason = self.api_task_complete(vm['node'], current_task_id, self.module.params['timeout'])
-
- if task_success:
- return True, ok_str % (disk, vmid)
- else:
- if fail_reason == ProxmoxAnsible.TASK_TIMED_OUT:
- self.module.fail_json(
- msg=timeout_str % self.proxmox_api.nodes(vm['node']).tasks(current_task_id).log.get()[:1]
- )
- else:
- self.module.fail_json(msg="Error occurred on task execution: %s" % fail_reason)
-
- def move_disk(self, disk, vmid, vm, vm_config):
- """Call the `move_disk` API function that moves the disk to another storage and wait for the result.
-
- :param disk: ID of disk in format "".
- :param vmid: ID of virtual machine which disk will be moved.
- :param vm: Name of virtual machine which disk will be moved.
- :param vm_config: Virtual machine configuration.
- :return: (bool, string) Whether the task was successful or not
- and the message to return to Ansible.
- """
- disk_config = disk_conf_str_to_dict(vm_config[disk])
- disk_storage = disk_config["storage_name"]
-
- params = dict()
- params['disk'] = disk
- params['vmid'] = vmid
- params['bwlimit'] = self.module.params['bwlimit']
- params['storage'] = self.module.params['target_storage']
- params['target-disk'] = self.module.params['target_disk']
- params['target-vmid'] = self.module.params['target_vmid']
- params['format'] = self.module.params['format']
- params['delete'] = 1 if self.module.params.get('delete_moved', False) else 0
- # Remove not defined args
- params = {k: v for k, v in params.items() if v is not None}
-
- if params.get('storage', False):
- # Check if the disk is already in the target storage.
- disk_config = disk_conf_str_to_dict(vm_config[disk])
- if params['storage'] == disk_config['storage_name']:
- return False, "Disk %s already at %s storage" % (disk, disk_storage)
-
- current_task_id = self.proxmox_api.nodes(vm['node']).qemu(vmid).move_disk.post(**params)
- task_success, fail_reason = self.api_task_complete(vm['node'], current_task_id, self.module.params['timeout'])
-
- if task_success:
- return True, "Disk %s moved from VM %s storage %s" % (disk, vmid, disk_storage)
- else:
- if fail_reason == ProxmoxAnsible.TASK_TIMED_OUT:
- self.module.fail_json(
- msg='Reached timeout while waiting for moving VM disk. Last line in task before timeout: %s' %
- self.proxmox_api.nodes(vm['node']).tasks(current_task_id).log.get()[:1]
- )
- else:
- self.module.fail_json(msg="Error occurred on task execution: %s" % fail_reason)
-
- def resize_disk(self, disk, vmid, vm, vm_config):
- """Call the `resize` API function to change the disk size and wait for the result.
-
- :param disk: ID of disk in format "".
- :param vmid: ID of virtual machine which disk will be resized.
- :param vm: Name of virtual machine which disk will be resized.
- :param vm_config: Virtual machine configuration.
- :return: (Bool, string) Whether the task was successful or not
- and the message to return to Ansible.
- """
- size = self.module.params['size']
- if not match(r'^\+?\d+(\.\d+)?[KMGT]?$', size):
- self.module.fail_json(msg="Unrecognized size pattern for disk %s: %s" % (disk, size))
- disk_config = disk_conf_str_to_dict(vm_config[disk])
- actual_size = disk_config['size']
- if size == actual_size:
- return False, "Disk %s is already %s size" % (disk, size)
-
- # Resize disk API endpoint has changed at v8.0: PUT method become async.
- version = self.version()
- pve_major_version = 3 if version < LooseVersion('4.0') else version.version[0]
- if pve_major_version >= 8:
- current_task_id = self.proxmox_api.nodes(vm['node']).qemu(vmid).resize.set(disk=disk, size=size)
- task_success, fail_reason = self.api_task_complete(vm['node'], current_task_id, self.module.params['timeout'])
- if task_success:
- return True, "Disk %s resized in VM %s" % (disk, vmid)
- else:
- if fail_reason == ProxmoxAnsible.TASK_TIMED_OUT:
- self.module.fail_json(
- msg="Reached timeout while resizing disk. Last line in task before timeout: %s" %
- self.proxmox_api.nodes(vm['node']).tasks(current_task_id).log.get()[:1]
- )
- else:
- self.module.fail_json(msg="Error occurred on task execution: %s" % fail_reason)
- else:
- self.proxmox_api.nodes(vm['node']).qemu(vmid).resize.set(disk=disk, size=size)
- return True, "Disk %s resized in VM %s" % (disk, vmid)
-
-
-def main():
- module_args = proxmox_auth_argument_spec()
- disk_args = dict(
- # Proxmox native parameters
- aio=dict(type='str', choices=['native', 'threads', 'io_uring']),
- backup=dict(type='bool'),
- bps_max_length=dict(type='int'),
- bps_rd_max_length=dict(type='int'),
- bps_wr_max_length=dict(type='int'),
- cache=dict(type='str', choices=['none', 'writethrough', 'writeback', 'unsafe', 'directsync']),
- cyls=dict(type='int'),
- detect_zeroes=dict(type='bool'),
- discard=dict(type='str', choices=['ignore', 'on']),
- format=dict(type='str', choices=['raw', 'cow', 'qcow', 'qed', 'qcow2', 'vmdk', 'cloop']),
- heads=dict(type='int'),
- import_from=dict(type='str'),
- iops=dict(type='int'),
- iops_max=dict(type='int'),
- iops_max_length=dict(type='int'),
- iops_rd=dict(type='int'),
- iops_rd_max=dict(type='int'),
- iops_rd_max_length=dict(type='int'),
- iops_wr=dict(type='int'),
- iops_wr_max=dict(type='int'),
- iops_wr_max_length=dict(type='int'),
- iothread=dict(type='bool'),
- iso_image=dict(type='str'),
- mbps=dict(type='float'),
- mbps_max=dict(type='float'),
- mbps_rd=dict(type='float'),
- mbps_rd_max=dict(type='float'),
- mbps_wr=dict(type='float'),
- mbps_wr_max=dict(type='float'),
- media=dict(type='str', choices=['cdrom', 'disk']),
- queues=dict(type='int'),
- replicate=dict(type='bool'),
- rerror=dict(type='str', choices=['ignore', 'report', 'stop']),
- ro=dict(type='bool'),
- scsiblock=dict(type='bool'),
- secs=dict(type='int'),
- serial=dict(type='str'),
- shared=dict(type='bool'),
- snapshot=dict(type='bool'),
- ssd=dict(type='bool'),
- trans=dict(type='str', choices=['auto', 'lba', 'none']),
- werror=dict(type='str', choices=['enospc', 'ignore', 'report', 'stop']),
- wwn=dict(type='str'),
-
- # Disk moving relates parameters
- bwlimit=dict(type='int'),
- target_storage=dict(type='str'),
- target_disk=dict(type='str'),
- target_vmid=dict(type='int'),
- delete_moved=dict(type='bool'),
- timeout=dict(type='int', default='600'),
-
- # Module related parameters
- name=dict(type='str'),
- vmid=dict(type='int'),
- disk=dict(type='str', required=True),
- storage=dict(type='str'),
- size=dict(type='str'),
- state=dict(type='str', choices=['present', 'resized', 'detached', 'moved', 'absent'],
- default='present'),
- create=dict(type='str', choices=['disabled', 'regular', 'forced'], default='regular'),
- )
-
- module_args.update(disk_args)
-
- module = AnsibleModule(
- argument_spec=module_args,
- required_together=[('api_token_id', 'api_token_secret')],
- required_one_of=[('name', 'vmid'), ('api_password', 'api_token_id')],
- required_if=[
- ('create', 'forced', ['storage']),
- ('state', 'resized', ['size']),
- ],
- required_by={
- 'target_disk': 'target_vmid',
- 'mbps_max': 'mbps',
- 'mbps_rd_max': 'mbps_rd',
- 'mbps_wr_max': 'mbps_wr',
- 'bps_max_length': 'mbps_max',
- 'bps_rd_max_length': 'mbps_rd_max',
- 'bps_wr_max_length': 'mbps_wr_max',
- 'iops_max': 'iops',
- 'iops_rd_max': 'iops_rd',
- 'iops_wr_max': 'iops_wr',
- 'iops_max_length': 'iops_max',
- 'iops_rd_max_length': 'iops_rd_max',
- 'iops_wr_max_length': 'iops_wr_max',
- 'iso_image': 'media',
- },
- supports_check_mode=False,
- mutually_exclusive=[
- ('target_vmid', 'target_storage'),
- ('mbps', 'mbps_rd'),
- ('mbps', 'mbps_wr'),
- ('iops', 'iops_rd'),
- ('iops', 'iops_wr'),
- ('import_from', 'size'),
- ]
- )
-
- proxmox = ProxmoxDiskAnsible(module)
-
- disk = module.params['disk']
- # Verify disk name has appropriate name
- disk_regex = compile(r'^([a-z]+)([0-9]+)$')
- disk_bus = sub(disk_regex, r'\1', disk)
- disk_number = int(sub(disk_regex, r'\2', disk))
- if disk_bus not in proxmox.supported_bus_num_ranges:
- proxmox.module.fail_json(msg='Unsupported disk bus: %s' % disk_bus)
- elif disk_number not in proxmox.supported_bus_num_ranges[disk_bus]:
- bus_range = proxmox.supported_bus_num_ranges[disk_bus]
- proxmox.module.fail_json(msg='Disk %s number not in range %s..%s ' % (disk, bus_range[0], bus_range[-1]))
-
- name = module.params['name']
- state = module.params['state']
- vmid = module.params['vmid'] or proxmox.get_vmid(name)
-
- # Ensure VM id exists and retrieve its config
- vm = None
- vm_config = None
- try:
- vm = proxmox.get_vm(vmid)
- vm_config = proxmox.proxmox_api.nodes(vm['node']).qemu(vmid).config.get()
- except Exception as e:
- proxmox.module.fail_json(msg='Getting information for VM %s failed with exception: %s' % (vmid, str(e)))
-
- # Do not try to perform actions on missing disk
- if disk not in vm_config and state in ['resized', 'moved']:
- module.fail_json(vmid=vmid, msg='Unable to process missing disk %s in VM %s' % (disk, vmid))
-
- if state == 'present':
- try:
- changed, message = proxmox.create_disk(disk, vmid, vm, vm_config)
- module.exit_json(changed=changed, vmid=vmid, msg=message)
- except Exception as e:
- module.fail_json(vmid=vmid, msg='Unable to create/update disk %s in VM %s: %s' % (disk, vmid, str(e)))
-
- elif state == 'detached':
- try:
- if disk_bus == 'unused':
- module.exit_json(changed=False, vmid=vmid, msg='Disk %s already detached in VM %s' % (disk, vmid))
- if disk not in vm_config:
- module.exit_json(changed=False, vmid=vmid, msg="Disk %s not present in VM %s config" % (disk, vmid))
- proxmox.proxmox_api.nodes(vm['node']).qemu(vmid).unlink.put(idlist=disk, force=0)
- module.exit_json(changed=True, vmid=vmid, msg="Disk %s detached from VM %s" % (disk, vmid))
- except Exception as e:
- module.fail_json(msg="Failed to detach disk %s from VM %s with exception: %s" % (disk, vmid, str(e)))
-
- elif state == 'moved':
- try:
- changed, message = proxmox.move_disk(disk, vmid, vm, vm_config)
- module.exit_json(changed=changed, vmid=vmid, msg=message)
- except Exception as e:
- module.fail_json(msg="Failed to move disk %s in VM %s with exception: %s" % (disk, vmid, str(e)))
-
- elif state == 'resized':
- try:
- changed, message = proxmox.resize_disk(disk, vmid, vm, vm_config)
- module.exit_json(changed=changed, vmid=vmid, msg=message)
- except Exception as e:
- module.fail_json(msg="Failed to resize disk %s in VM %s with exception: %s" % (disk, vmid, str(e)))
-
- elif state == 'absent':
- try:
- if disk not in vm_config:
- module.exit_json(changed=False, vmid=vmid, msg="Disk %s is already absent in VM %s" % (disk, vmid))
- proxmox.proxmox_api.nodes(vm['node']).qemu(vmid).unlink.put(idlist=disk, force=1)
- module.exit_json(changed=True, vmid=vmid, msg="Disk %s removed from VM %s" % (disk, vmid))
- except Exception as e:
- module.fail_json(vmid=vmid, msg='Unable to remove disk %s from VM %s: %s' % (disk, vmid, str(e)))
-
-
-if __name__ == '__main__':
- main()
diff --git a/plugins/modules/proxmox_domain_info.py b/plugins/modules/proxmox_domain_info.py
deleted file mode 100644
index f3ff212bff..0000000000
--- a/plugins/modules/proxmox_domain_info.py
+++ /dev/null
@@ -1,138 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-#
-# Copyright Tristan Le Guern (@tleguern)
-# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
----
-module: proxmox_domain_info
-short_description: Retrieve information about one or more Proxmox VE domains
-version_added: 1.3.0
-description:
- - Retrieve information about one or more Proxmox VE domains.
-attributes:
- action_group:
- version_added: 9.0.0
-options:
- domain:
- description:
- - Restrict results to a specific authentication realm.
- aliases: ['realm', 'name']
- type: str
-author: Tristan Le Guern (@tleguern)
-extends_documentation_fragment:
- - community.general.proxmox.actiongroup_proxmox
- - community.general.proxmox.documentation
- - community.general.attributes
- - community.general.attributes.info_module
-'''
-
-
-EXAMPLES = '''
-- name: List existing domains
- community.general.proxmox_domain_info:
- api_host: helldorado
- api_user: root@pam
- api_password: "{{ password | default(omit) }}"
- api_token_id: "{{ token_id | default(omit) }}"
- api_token_secret: "{{ token_secret | default(omit) }}"
- register: proxmox_domains
-
-- name: Retrieve information about the pve domain
- community.general.proxmox_domain_info:
- api_host: helldorado
- api_user: root@pam
- api_password: "{{ password | default(omit) }}"
- api_token_id: "{{ token_id | default(omit) }}"
- api_token_secret: "{{ token_secret | default(omit) }}"
- domain: pve
- register: proxmox_domain_pve
-'''
-
-
-RETURN = '''
-proxmox_domains:
- description: List of authentication domains.
- returned: always, but can be empty
- type: list
- elements: dict
- contains:
- comment:
- description: Short description of the realm.
- returned: on success
- type: str
- realm:
- description: Realm name.
- returned: on success
- type: str
- type:
- description: Realm type.
- returned: on success
- type: str
- digest:
- description: Realm hash.
- returned: on success, can be absent
- type: str
-'''
-
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible_collections.community.general.plugins.module_utils.proxmox import (
- proxmox_auth_argument_spec, ProxmoxAnsible)
-
-
-class ProxmoxDomainInfoAnsible(ProxmoxAnsible):
- def get_domain(self, realm):
- try:
- domain = self.proxmox_api.access.domains.get(realm)
- except Exception:
- self.module.fail_json(msg="Domain '%s' does not exist" % realm)
- domain['realm'] = realm
- return domain
-
- def get_domains(self):
- domains = self.proxmox_api.access.domains.get()
- return domains
-
-
-def proxmox_domain_info_argument_spec():
- return dict(
- domain=dict(type='str', aliases=['realm', 'name']),
- )
-
-
-def main():
- module_args = proxmox_auth_argument_spec()
- domain_info_args = proxmox_domain_info_argument_spec()
- module_args.update(domain_info_args)
-
- module = AnsibleModule(
- argument_spec=module_args,
- required_one_of=[('api_password', 'api_token_id')],
- required_together=[('api_token_id', 'api_token_secret')],
- supports_check_mode=True
- )
- result = dict(
- changed=False
- )
-
- proxmox = ProxmoxDomainInfoAnsible(module)
- domain = module.params['domain']
-
- if domain:
- domains = [proxmox.get_domain(realm=domain)]
- else:
- domains = proxmox.get_domains()
- result['proxmox_domains'] = domains
-
- module.exit_json(**result)
-
-
-if __name__ == '__main__':
- main()
diff --git a/plugins/modules/proxmox_group_info.py b/plugins/modules/proxmox_group_info.py
deleted file mode 100644
index eda1fe04d8..0000000000
--- a/plugins/modules/proxmox_group_info.py
+++ /dev/null
@@ -1,148 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-#
-# Copyright Tristan Le Guern
-# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
----
-module: proxmox_group_info
-short_description: Retrieve information about one or more Proxmox VE groups
-version_added: 1.3.0
-description:
- - Retrieve information about one or more Proxmox VE groups
-attributes:
- action_group:
- version_added: 9.0.0
-options:
- group:
- description:
- - Restrict results to a specific group.
- aliases: ['groupid', 'name']
- type: str
-author: Tristan Le Guern (@tleguern)
-extends_documentation_fragment:
- - community.general.proxmox.actiongroup_proxmox
- - community.general.proxmox.documentation
- - community.general.attributes
- - community.general.attributes.info_module
-'''
-
-
-EXAMPLES = '''
-- name: List existing groups
- community.general.proxmox_group_info:
- api_host: helldorado
- api_user: root@pam
- api_password: "{{ password | default(omit) }}"
- api_token_id: "{{ token_id | default(omit) }}"
- api_token_secret: "{{ token_secret | default(omit) }}"
- register: proxmox_groups
-
-- name: Retrieve information about the admin group
- community.general.proxmox_group_info:
- api_host: helldorado
- api_user: root@pam
- api_password: "{{ password | default(omit) }}"
- api_token_id: "{{ token_id | default(omit) }}"
- api_token_secret: "{{ token_secret | default(omit) }}"
- group: admin
- register: proxmox_group_admin
-'''
-
-
-RETURN = '''
-proxmox_groups:
- description: List of groups.
- returned: always, but can be empty
- type: list
- elements: dict
- contains:
- comment:
- description: Short description of the group.
- returned: on success, can be absent
- type: str
- groupid:
- description: Group name.
- returned: on success
- type: str
- users:
- description: List of users in the group.
- returned: on success
- type: list
- elements: str
-'''
-
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible_collections.community.general.plugins.module_utils.proxmox import (
- proxmox_auth_argument_spec, ProxmoxAnsible)
-
-
-class ProxmoxGroupInfoAnsible(ProxmoxAnsible):
- def get_group(self, groupid):
- try:
- group = self.proxmox_api.access.groups.get(groupid)
- except Exception:
- self.module.fail_json(msg="Group '%s' does not exist" % groupid)
- group['groupid'] = groupid
- return ProxmoxGroup(group)
-
- def get_groups(self):
- groups = self.proxmox_api.access.groups.get()
- return [ProxmoxGroup(group) for group in groups]
-
-
-class ProxmoxGroup:
- def __init__(self, group):
- self.group = dict()
- # Data representation is not the same depending on API calls
- for k, v in group.items():
- if k == 'users' and isinstance(v, str):
- self.group['users'] = v.split(',')
- elif k == 'members':
- self.group['users'] = group['members']
- else:
- self.group[k] = v
-
-
-def proxmox_group_info_argument_spec():
- return dict(
- group=dict(type='str', aliases=['groupid', 'name']),
- )
-
-
-def main():
- module_args = proxmox_auth_argument_spec()
- group_info_args = proxmox_group_info_argument_spec()
- module_args.update(group_info_args)
-
- module = AnsibleModule(
- argument_spec=module_args,
- required_one_of=[('api_password', 'api_token_id')],
- required_together=[('api_token_id', 'api_token_secret')],
- supports_check_mode=True
- )
- result = dict(
- changed=False
- )
-
- proxmox = ProxmoxGroupInfoAnsible(module)
- group = module.params['group']
-
- if group:
- groups = [proxmox.get_group(groupid=group)]
- else:
- groups = proxmox.get_groups()
- result['proxmox_groups'] = [group.group for group in groups]
-
- module.exit_json(**result)
-
-
-if __name__ == '__main__':
- main()
diff --git a/plugins/modules/proxmox_kvm.py b/plugins/modules/proxmox_kvm.py
deleted file mode 100644
index 771ddd902f..0000000000
--- a/plugins/modules/proxmox_kvm.py
+++ /dev/null
@@ -1,1664 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-#
-# Copyright (c) 2016, Abdoul Bah (@helldorado)
-# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-DOCUMENTATION = r'''
----
-module: proxmox_kvm
-short_description: Management of Qemu(KVM) Virtual Machines in Proxmox VE cluster
-description:
- - Allows you to create/delete/stop Qemu(KVM) Virtual Machines in Proxmox VE cluster.
- - Since community.general 4.0.0 on, there are no more default values, see O(proxmox_default_behavior).
-author: "Abdoul Bah (@helldorado) "
-attributes:
- check_mode:
- support: none
- diff_mode:
- support: none
- action_group:
- version_added: 9.0.0
-options:
- archive:
- description:
- - Specify a path to an archive to restore (instead of creating or cloning a VM).
- type: str
- version_added: 6.5.0
- acpi:
- description:
- - Specify if ACPI should be enabled/disabled.
- - This option has no default unless O(proxmox_default_behavior) is set to V(compatibility); then the default is V(true).
- type: bool
- agent:
- description:
- - Specify if the QEMU Guest Agent should be enabled/disabled.
- - Since community.general 5.5.0, this can also be a string instead of a boolean.
- This allows to specify values such as V(enabled=1,fstrim_cloned_disks=1).
- type: str
- args:
- description:
- - Pass arbitrary arguments to kvm.
- - This option is for experts only!
- - If O(proxmox_default_behavior) is set to V(compatibility), this option has a default of
- V(-serial unix:/var/run/qemu-server/.serial,server,nowait).
- type: str
- autostart:
- description:
- - Specify if the VM should be automatically restarted after crash (currently ignored in PVE API).
- - This option has no default unless O(proxmox_default_behavior) is set to V(compatibility); then the default is V(false).
- type: bool
- balloon:
- description:
- - Specify the amount of RAM for the VM in MB.
- - Using zero disables the balloon driver.
- - This option has no default unless O(proxmox_default_behavior) is set to V(compatibility); then the default is V(0).
- type: int
- bios:
- description:
- - Specify the BIOS implementation.
- type: str
- choices: ['seabios', 'ovmf']
- boot:
- description:
- - Specify the boot order -> boot on floppy V(a), hard disk V(c), CD-ROM V(d), or network V(n).
- - For newer versions of Proxmox VE, use a boot order like V(order=scsi0;net0;hostpci0).
- - You can combine to set order.
- - This option has no default unless O(proxmox_default_behavior) is set to V(compatibility); then the default is V(cnd).
- type: str
- bootdisk:
- description:
- - 'Enable booting from specified disk. Format V((ide|sata|scsi|virtio\)\\d+).'
- type: str
- cicustom:
- description:
- - 'cloud-init: Specify custom files to replace the automatically generated ones at start.'
- type: str
- version_added: 1.3.0
- cipassword:
- description:
- - 'cloud-init: password of default user to create.'
- type: str
- version_added: 1.3.0
- citype:
- description:
- - 'cloud-init: Specifies the cloud-init configuration format.'
- - The default depends on the configured operating system type (V(ostype)).
- - We use the V(nocloud) format for Linux, and V(configdrive2) for Windows.
- type: str
- choices: ['nocloud', 'configdrive2']
- version_added: 1.3.0
- ciuser:
- description:
- - 'cloud-init: username of default user to create.'
- type: str
- version_added: 1.3.0
- clone:
- description:
- - Name of VM to be cloned. If O(vmid) is set, O(clone) can take an arbitrary value but is required for initiating the clone.
- type: str
- cores:
- description:
- - Specify number of cores per socket.
- - This option has no default unless O(proxmox_default_behavior) is set to V(compatibility); then the default is V(1).
- type: int
- cpu:
- description:
- - Specify emulated CPU type.
- - This option has no default unless O(proxmox_default_behavior) is set to V(compatibility); then the default is V(kvm64).
- type: str
- cpulimit:
- description:
- - Specify if CPU usage will be limited. Value 0 indicates no CPU limit.
- - If the computer has 2 CPUs, it has total of '2' CPU time
- type: int
- cpuunits:
- description:
- - Specify CPU weight for a VM.
- - You can disable fair-scheduler configuration by setting this to 0
- - This option has no default unless O(proxmox_default_behavior) is set to V(compatibility); then the default is V(1000).
- type: int
- delete:
- description:
- - Specify a list of settings you want to delete.
- type: str
- description:
- description:
- - Specify the description for the VM. Only used on the configuration web interface.
- - This is saved as comment inside the configuration file.
- type: str
- digest:
- description:
- - Specify if to prevent changes if current configuration file has different SHA1 digest.
- - This can be used to prevent concurrent modifications.
- type: str
- efidisk0:
- description:
- - Specify a hash/dictionary of EFI disk options.
- - Requires O(bios=ovmf) to be set to be able to use it.
- type: dict
- suboptions:
- storage:
- description:
- - V(storage) is the storage identifier where to create the disk.
- type: str
- format:
- description:
- - V(format) is the drive's backing file's data format. Please refer to the Proxmox VE Administrator Guide,
- section Proxmox VE Storage (see U(https://pve.proxmox.com/pve-docs/chapter-pvesm.html) for the latest
- version, tables 3 to 14) to find out format supported by the provided storage backend.
- type: str
- efitype:
- description:
- - V(efitype) indicates the size of the EFI disk.
- - V(2m) will allow for a 2MB EFI disk, which will be enough to persist boot order and new boot entries.
- - V(4m) will allow for a 4MB EFI disk, which will additionally allow to store EFI keys in order to enable
- Secure Boot
- type: str
- choices:
- - 2m
- - 4m
- pre_enrolled_keys:
- description:
- - V(pre_enrolled_keys) indicates whether EFI keys for Secure Boot should be enrolled V(1) in the VM firmware
- upon creation or not (0).
- - If set to V(1), Secure Boot will also be enabled by default when the VM is created.
- type: bool
- version_added: 4.5.0
- force:
- description:
- - Allow to force stop VM.
- - Can be used with states V(stopped), V(restarted), and V(absent).
- - This option has no default unless O(proxmox_default_behavior) is set to V(compatibility); then the default is V(false).
- - Requires parameter O(archive).
- type: bool
- format:
- description:
- - Target drive's backing file's data format.
- - Used only with clone
- - Use O(format=unspecified) and O(full=false) for a linked clone.
- - Please refer to the Proxmox VE Administrator Guide, section Proxmox VE Storage (see
- U(https://pve.proxmox.com/pve-docs/chapter-pvesm.html) for the latest version, tables 3 to 14) to find out format
- supported by the provided storage backend.
- - This option has no default unless O(proxmox_default_behavior) is set to V(compatibility); then the default is V(qcow2).
- If O(proxmox_default_behavior) is set to V(no_defaults), not specifying this option is equivalent to setting it to V(unspecified).
- type: str
- choices: [ "cloop", "cow", "qcow", "qcow2", "qed", "raw", "vmdk", "unspecified" ]
- freeze:
- description:
- - Specify if PVE should freeze CPU at startup (use 'c' monitor command to start execution).
- type: bool
- full:
- description:
- - Create a full copy of all disk. This is always done when you clone a normal VM.
- - For VM templates, we try to create a linked clone by default.
- - Used only with clone
- type: bool
- default: true
- hookscript:
- description:
- - Script that will be executed during various steps in the containers lifetime.
- type: str
- version_added: 8.1.0
- hostpci:
- description:
- - Specify a hash/dictionary of map host pci devices into guest. O(hostpci='{"key":"value", "key":"value"}').
- - Keys allowed are - C(hostpci[n]) where 0 ≤ n ≤ N.
- - Values allowed are - C("host="HOSTPCIID[;HOSTPCIID2...]",pcie="1|0",rombar="1|0",x-vga="1|0"").
- - The C(host) parameter is Host PCI device pass through. HOSTPCIID syntax is C(bus:dev.func) (hexadecimal numbers).
- - C(pcie=boolean) C(default=0) Choose the PCI-express bus (needs the q35 machine model).
- - C(rombar=boolean) C(default=1) Specify whether or not the device's ROM will be visible in the guest's memory map.
- - C(x-vga=boolean) C(default=0) Enable vfio-vga device support.
- - /!\ This option allows direct access to host hardware. So it is no longer possible to migrate such machines - use with special care.
- type: dict
- hotplug:
- description:
- - Selectively enable hotplug features.
- - This is a comma separated list of hotplug features V(network), V(disk), V(cpu), V(memory), and V(usb).
- - Value 0 disables hotplug completely and value 1 is an alias for the default V(network,disk,usb).
- type: str
- hugepages:
- description:
- - Enable/disable hugepages memory.
- type: str
- choices: ['any', '2', '1024']
- ide:
- description:
- - A hash/dictionary of volume used as IDE hard disk or CD-ROM. O(ide='{"key":"value", "key":"value"}').
- - Keys allowed are - C(ide[n]) where 0 ≤ n ≤ 3.
- - Values allowed are - C("storage:size,format=value").
- - C(storage) is the storage identifier where to create the disk.
- - C(size) is the size of the disk in GB.
- - C(format) is the drive's backing file's data format. C(qcow2|raw|subvol). Please refer to the Proxmox VE
- Administrator Guide, section Proxmox VE Storage (see U(https://pve.proxmox.com/pve-docs/chapter-pvesm.html) for
- the latest version, tables 3 to 14) to find out format supported by the provided storage backend.
- type: dict
- ipconfig:
- description:
- - 'cloud-init: Set the IP configuration.'
- - A hash/dictionary of network ip configurations. O(ipconfig='{"key":"value", "key":"value"}').
- - Keys allowed are - C(ipconfig[n]) where 0 ≤ n ≤ network interfaces.
- - Values allowed are - C("[gw=] [,gw6=] [,ip=] [,ip6=]").
- - 'cloud-init: Specify IP addresses and gateways for the corresponding interface.'
- - IP addresses use CIDR notation, gateways are optional but they should be in the same subnet of specified IP address.
- - The special string 'dhcp' can be used for IP addresses to use DHCP, in which case no explicit gateway should be provided.
- - For IPv6 the special string 'auto' can be used to use stateless autoconfiguration.
- - If cloud-init is enabled and neither an IPv4 nor an IPv6 address is specified, it defaults to using dhcp on IPv4.
- type: dict
- version_added: 1.3.0
- keyboard:
- description:
- - Sets the keyboard layout for VNC server.
- type: str
- kvm:
- description:
- - Enable/disable KVM hardware virtualization.
- - This option has no default unless O(proxmox_default_behavior) is set to V(compatibility); then the default is V(true).
- type: bool
- localtime:
- description:
- - Sets the real time clock to local time.
- - This is enabled by default if ostype indicates a Microsoft OS.
- type: bool
- lock:
- description:
- - Lock/unlock the VM.
- type: str
- choices: ['migrate', 'backup', 'snapshot', 'rollback']
- machine:
- description:
- - Specifies the Qemu machine type.
- - 'Type => V((pc|pc(-i440fx\)?-\\d+\\.\\d+(\\.pxe\)?|q35|pc-q35-\\d+\\.\\d+(\\.pxe\)?\)).'
- type: str
- memory:
- description:
- - Memory size in MB for instance.
- - This option has no default unless O(proxmox_default_behavior) is set to V(compatibility); then the default is V(512).
- type: int
- migrate:
- description:
- - Migrate the VM to O(node) if it is on another node.
- type: bool
- default: false
- version_added: 7.0.0
- migrate_downtime:
- description:
- - Sets maximum tolerated downtime (in seconds) for migrations.
- type: int
- migrate_speed:
- description:
- - Sets maximum speed (in MB/s) for migrations.
- - A value of 0 is no limit.
- type: int
- name:
- description:
- - Specifies the VM name. Name could be non-unique across the cluster.
- - Required only for O(state=present).
- - With O(state=present) if O(vmid) not provided and VM with name exists in the cluster then no changes will be made.
- type: str
- nameservers:
- description:
- - 'cloud-init: DNS server IP address(es).'
- - If unset, PVE host settings are used.
- type: list
- elements: str
- version_added: 1.3.0
- net:
- description:
- - A hash/dictionary of network interfaces for the VM. O(net='{"key":"value", "key":"value"}').
- - Keys allowed are - C(net[n]) where 0 ≤ n ≤ N.
- - Values allowed are - C("model="XX:XX:XX:XX:XX:XX",bridge="value",rate="value",tag="value",firewall="1|0",trunks="vlanid"").
- - Model is one of C(e1000 e1000-82540em e1000-82544gc e1000-82545em i82551 i82557b i82559er ne2k_isa ne2k_pci pcnet rtl8139 virtio vmxnet3).
- - C(XX:XX:XX:XX:XX:XX) should be an unique MAC address. This is automatically generated if not specified.
- - The C(bridge) parameter can be used to automatically add the interface to a bridge device. The Proxmox VE standard bridge is called 'vmbr0'.
- - Option C(rate) is used to limit traffic bandwidth from and to this interface. It is specified as floating point number, unit is 'Megabytes per second'.
- - If you specify no bridge, we create a kvm 'user' (NATed) network device, which provides DHCP and DNS services.
- type: dict
- newid:
- description:
- - VMID for the clone. Used only with clone.
- - If newid is not set, the next available VM ID will be fetched from ProxmoxAPI.
- type: int
- numa:
- description:
- - A hash/dictionaries of NUMA topology. O(numa='{"key":"value", "key":"value"}').
- - Keys allowed are - C(numa[n]) where 0 ≤ n ≤ N.
- - Values allowed are - C("cpu="